From 5ec332c197718d105fc33ba38407e34f7849477c Mon Sep 17 00:00:00 2001 From: Mike Durnosvistov Date: Tue, 18 Nov 2014 16:10:06 +0200 Subject: [PATCH] Replacement `_` on `_LI` in all LOG.info - part 2 oslo.i18n uses different marker functions to separate the translatable messages into different catalogs, which the translation teams can prioritize translating. For details, please refer to: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#guidelines-for-use-in-openstack There were not marker fuctions some places in directory network. This commit makes changes: * Add missing marker functions * Use ',' instead of '%' while adding variables to log messages Change-Id: Iaebb239ef20a0da3df1e3552baf26f412d0fcdc0 --- nova/cells/filters/target_cell.py | 6 +- nova/cells/messaging.py | 6 +- nova/cells/scheduler.py | 6 +- nova/compute/api.py | 31 ++++---- nova/compute/manager.py | 121 +++++++++++++++--------------- nova/compute/resource_tracker.py | 12 +-- nova/conductor/api.py | 6 +- nova/db/sqlalchemy/api.py | 6 +- nova/filters.py | 4 +- nova/hacking/checks.py | 11 +-- nova/image/download/file.py | 4 +- nova/image/s3.py | 4 +- nova/wsgi.py | 8 +- 13 files changed, 111 insertions(+), 114 deletions(-) diff --git a/nova/cells/filters/target_cell.py b/nova/cells/filters/target_cell.py index 43c81e71e5eb..be90d8014c9a 100644 --- a/nova/cells/filters/target_cell.py +++ b/nova/cells/filters/target_cell.py @@ -22,7 +22,7 @@ done as there's no way to know whether the full path is a valid. """ from nova.cells import filters -from nova.i18n import _ +from nova.i18n import _LI from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -55,8 +55,8 @@ class TargetCellFilter(filters.BaseCellFilter): # No filtering, if not authorized. return cells - LOG.info(_("Forcing direct route to %(cell_name)s because " - "of 'target_cell' scheduler hint"), + LOG.info(_LI("Forcing direct route to %(cell_name)s because " + "of 'target_cell' scheduler hint"), {'cell_name': cell_name}) scheduler = filter_properties['scheduler'] diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index 5bc2e31c5437..7c5c060bad44 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -49,7 +49,7 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context from nova.db import base from nova import exception -from nova.i18n import _, _LE +from nova.i18n import _, _LE, _LI from nova.network import model as network_model from nova import objects from nova.objects import base as objects_base @@ -1131,8 +1131,8 @@ class _BroadcastMessageMethods(_BaseMessageMethods): **kwargs): projid_str = project_id is None and "" or project_id since_str = updated_since is None and "" or updated_since - LOG.info(_("Forcing a sync of instances, project_id=" - "%(projid_str)s, updated_since=%(since_str)s"), + LOG.info(_LI("Forcing a sync of instances, project_id=" + "%(projid_str)s, updated_since=%(since_str)s"), {'projid_str': projid_str, 'since_str': since_str}) if updated_since is not None: updated_since = timeutils.parse_isotime(updated_since) diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py index e19d5355b43c..82ead762fa0a 100644 --- a/nova/cells/scheduler.py +++ b/nova/cells/scheduler.py @@ -30,7 +30,7 @@ from nova.compute import vm_states from nova import conductor from nova.db import base from nova import exception -from nova.i18n import _, _LE +from nova.i18n import _, _LE, _LI from nova import objects from nova.objects import base as obj_base from nova.openstack.common import log as logging @@ -236,8 +236,8 @@ class CellsScheduler(base.Base): if i == max(0, CONF.cells.scheduler_retries): raise sleep_time = max(1, CONF.cells.scheduler_retry_delay) - LOG.info(_("No cells available when scheduling. Will " - "retry in %(sleep_time)s second(s)"), + LOG.info(_LI("No cells available when scheduling. Will " + "retry in %(sleep_time)s second(s)"), {'sleep_time': sleep_time}) time.sleep(sleep_time) continue diff --git a/nova/compute/api.py b/nova/compute/api.py index 789db3b81ec5..5ebf61440f63 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -51,6 +51,7 @@ from nova import exception from nova import hooks from nova.i18n import _ from nova.i18n import _LE +from nova.i18n import _LI from nova import image from nova import keymgr from nova import network @@ -1520,7 +1521,7 @@ class API(base.Base): def _delete(self, context, instance, delete_type, cb, **instance_attrs): if instance.disable_terminate: - LOG.info(_('instance termination disabled'), + LOG.info(_LI('instance termination disabled'), instance=instance) return @@ -1534,8 +1535,8 @@ class API(base.Base): if instance['vm_state'] in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): snapshot_id = instance.system_metadata.get('shelved_image_id') - LOG.info(_("Working on deleting snapshot %s " - "from shelved instance..."), + LOG.info(_LI("Working on deleting snapshot %s " + "from shelved instance..."), snapshot_id, instance=instance) try: self.image_api.delete(context, snapshot_id) @@ -1604,8 +1605,9 @@ class API(base.Base): and original_task_state in ( task_states.DELETING, task_states.SOFT_DELETING)): - LOG.info(_('Instance is already in deleting state, ' - 'ignoring this request'), instance=instance) + LOG.info(_LI('Instance is already in deleting state, ' + 'ignoring this request'), + instance=instance) quotas.rollback() return @@ -1639,18 +1641,18 @@ class API(base.Base): try: migration = objects.Migration.get_by_instance_and_status( context.elevated(), instance.uuid, status) - LOG.info(_('Found an unconfirmed migration during delete, ' - 'id: %(id)s, status: %(status)s') % - {'id': migration.id, - 'status': migration.status}, - context=context, instance=instance) + LOG.info(_LI('Found an unconfirmed migration during delete, ' + 'id: %(id)s, status: %(status)s'), + {'id': migration.id, + 'status': migration.status}, + context=context, instance=instance) break except exception.MigrationNotFoundByStatus: pass if not migration: - LOG.info(_('Instance may have been confirmed during delete'), - context=context, instance=instance) + LOG.info(_LI('Instance may have been confirmed during delete'), + context=context, instance=instance) return src_host = migration.source_compute @@ -1666,8 +1668,9 @@ class API(base.Base): try: deltas = self._downsize_quota_delta(context, instance) except KeyError: - LOG.info(_('Migration %s may have been confirmed during delete') % - migration.id, context=context, instance=instance) + LOG.info(_LI('Migration %s may have been confirmed during ' + 'delete'), + migration.id, context=context, instance=instance) return quotas = self._reserve_quota_delta(context, deltas, instance) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 56d1b6930d59..d299da9e5fdf 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -281,7 +281,8 @@ def reverts_task_state(function): # task is preempted. Do not clear task state in this # case. with excutils.save_and_reraise_exception(): - LOG.info(_("Task possibly preempted: %s") % e.format_message()) + LOG.info(_LI("Task possibly preempted: %s"), + e.format_message()) except Exception: with excutils.save_and_reraise_exception(): try: @@ -740,9 +741,9 @@ class ComputeManager(manager.Manager): 'vm_state': instance.vm_state}, instance=instance) continue - LOG.info(_('Deleting instance as its host (' - '%(instance_host)s) is not equal to our ' - 'host (%(our_host)s).'), + LOG.info(_LI('Deleting instance as its host (' + '%(instance_host)s) is not equal to our ' + 'host (%(our_host)s).'), {'instance_host': instance.host, 'our_host': our_host}, instance=instance) try: @@ -755,8 +756,8 @@ class ComputeManager(manager.Manager): except exception.InstanceNotFound: network_info = network_model.NetworkInfo() bdi = {} - LOG.info(_('Instance has been marked deleted already, ' - 'removing it from the hypervisor.'), + LOG.info(_LI('Instance has been marked deleted already, ' + 'removing it from the hypervisor.'), instance=instance) # always destroy disks if the instance was deleted destroy_disks = True @@ -899,9 +900,9 @@ class ComputeManager(manager.Manager): if instance.task_state == task_states.DELETING: try: - LOG.info(_('Service started deleting the instance during ' - 'the previous run, but did not finish. Restarting ' - 'the deletion now.'), instance=instance) + LOG.info(_LI('Service started deleting the instance during ' + 'the previous run, but did not finish. Restarting' + ' the deletion now.'), instance=instance) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -998,8 +999,8 @@ class ComputeManager(manager.Manager): LOG.exception(_LE('Failed to revert crashed migration'), instance=instance) finally: - LOG.info(_('Instance found in migrating state during ' - 'startup. Resetting task_state'), + LOG.info(_LI('Instance found in migrating state during ' + 'startup. Resetting task_state'), instance=instance) instance.task_state = None instance.save() @@ -1020,7 +1021,7 @@ class ComputeManager(manager.Manager): instance=instance) if expect_running and CONF.resume_guests_state_on_host_boot: - LOG.info(_('Rebooting instance after nova-compute restart.'), + LOG.info(_LI('Rebooting instance after nova-compute restart.'), instance=instance) block_device_info = \ @@ -1069,8 +1070,8 @@ class ComputeManager(manager.Manager): return retry_reboot, reboot_type def handle_lifecycle_event(self, event): - LOG.info(_("VM %(state)s (Lifecycle Event)") % - {'state': event.get_name()}, + LOG.info(_LI("VM %(state)s (Lifecycle Event)"), + {'state': event.get_name()}, instance_uuid=event.get_instance_uuid()) context = nova.context.get_admin_context(read_deleted='yes') instance = objects.Instance.get_by_uuid(context, @@ -2507,7 +2508,7 @@ class ComputeManager(manager.Manager): try: self._delete_instance(context, instance, bdms, quotas) except exception.InstanceNotFound: - LOG.info(_("Instance disappeared during terminate"), + LOG.info(_LI("Instance disappeared during terminate"), instance=instance) except Exception: # As we're trying to delete always go to Error if something @@ -2735,12 +2736,12 @@ class ComputeManager(manager.Manager): " storage")) if on_shared_storage: - LOG.info(_('disk on shared storage, recreating using' - ' existing disk')) + LOG.info(_LI('disk on shared storage, recreating using' + ' existing disk')) else: image_ref = orig_image_ref = instance.image_ref - LOG.info(_("disk not on shared storage, rebuilding from:" - " '%s'") % str(image_ref)) + LOG.info(_LI("disk not on shared storage, rebuilding from:" + " '%s'"), str(image_ref)) # NOTE(mriedem): On a recreate (evacuate), we need to update # the instance's host and node properties to reflect it's @@ -2859,7 +2860,7 @@ class ComputeManager(manager.Manager): # compute-manager. # # API-detach - LOG.info(_("Detaching from volume api: %s") % volume_id) + LOG.info(_LI("Detaching from volume api: %s"), volume_id) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self.volume_api.begin_detaching(context, volume_id) @@ -3375,8 +3376,8 @@ class ComputeManager(manager.Manager): return if migration.status == 'confirmed': - LOG.info(_("Migration %s is already confirmed") % - migration_id, context=context, instance=instance) + LOG.info(_LI("Migration %s is already confirmed"), + migration_id, context=context, instance=instance) quotas.rollback() return elif migration.status not in ('finished', 'confirming'): @@ -3395,8 +3396,8 @@ class ComputeManager(manager.Manager): context, instance.uuid, expected_attrs=expected_attrs) except exception.InstanceNotFound: - LOG.info(_("Instance is not found during confirmation"), - context=context, instance=instance) + LOG.info(_LI("Instance is not found during confirmation"), + context=context, instance=instance) quotas.rollback() return @@ -3577,7 +3578,7 @@ class ComputeManager(manager.Manager): migration_p) # if the original vm state was STOPPED, set it back to STOPPED - LOG.info(_("Updating instance to original state: '%s'") % + LOG.info(_LI("Updating instance to original state: '%s'"), old_vm_state) if power_on: instance.vm_state = vm_states.ACTIVE @@ -5038,7 +5039,7 @@ class ComputeManager(manager.Manager): required for live migration without shared storage """ - LOG.info(_('_post_live_migration() is started..'), + LOG.info(_LI('_post_live_migration() is started..'), instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -5118,11 +5119,11 @@ class ComputeManager(manager.Manager): self._notify_about_instance_usage(ctxt, instance, "live_migration._post.end", network_info=network_info) - LOG.info(_('Migrating instance to %s finished successfully.'), + LOG.info(_LI('Migrating instance to %s finished successfully.'), dest, instance=instance) - LOG.info(_("You may see the error \"libvirt: QEMU error: " - "Domain not found: no domain with matching name.\" " - "This error can be safely ignored."), + LOG.info(_LI("You may see the error \"libvirt: QEMU error: " + "Domain not found: no domain with matching name.\" " + "This error can be safely ignored."), instance=instance) if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled: @@ -5145,7 +5146,7 @@ class ComputeManager(manager.Manager): :param block_migration: if true, prepare for block migration """ - LOG.info(_('Post operation of migration started'), + LOG.info(_LI('Post operation of migration started'), instance=instance) # NOTE(tr3buchet): setup networks on destination host @@ -5408,8 +5409,8 @@ class ComputeManager(manager.Manager): confirm_window=CONF.resize_confirm_window) if migrations_info["migration_count"] > 0: - LOG.info(_("Found %(migration_count)d unconfirmed migrations " - "older than %(confirm_window)d seconds"), + LOG.info(_LI("Found %(migration_count)d unconfirmed migrations " + "older than %(confirm_window)d seconds"), migrations_info) def _set_migration_to_error(migration, reason, **kwargs): @@ -5422,8 +5423,8 @@ class ComputeManager(manager.Manager): for migration in migrations: instance_uuid = migration.instance_uuid - LOG.info(_("Automatically confirming migration " - "%(migration_id)s for instance %(instance_uuid)s"), + LOG.info(_LI("Automatically confirming migration " + "%(migration_id)s for instance %(instance_uuid)s"), {'migration_id': migration.id, 'instance_uuid': instance_uuid}) expected_attrs = ['metadata', 'system_metadata'] @@ -5476,8 +5477,8 @@ class ComputeManager(manager.Manager): self.compute_api.confirm_resize(context, instance, migration=migration) except Exception as e: - LOG.info(_("Error auto-confirming resize: %s. " - "Will retry later."), + LOG.info(_LI("Error auto-confirming resize: %s. " + "Will retry later."), e, instance=instance) @periodic_task.periodic_task(spacing=CONF.shelved_poll_interval) @@ -5524,10 +5525,10 @@ class ComputeManager(manager.Manager): num_instances = len(instances) errors = 0 successes = 0 - LOG.info(_("Running instance usage audit for" - " host %(host)s from %(begin_time)s to " - "%(end_time)s. %(number_instances)s" - " instances."), + LOG.info(_LI("Running instance usage audit for" + " host %(host)s from %(begin_time)s to " + "%(end_time)s. %(number_instances)s" + " instances."), dict(host=self.host, begin_time=begin, end_time=end, @@ -5572,7 +5573,7 @@ class ComputeManager(manager.Manager): if (curr_time - self._last_bw_usage_poll > CONF.bandwidth_poll_interval): self._last_bw_usage_poll = curr_time - LOG.info(_("Updating bandwidth usage cache")) + LOG.info(_LI("Updating bandwidth usage cache")) cells_update_interval = CONF.cells.bandwidth_update_interval if (cells_update_interval > 0 and curr_time - self._last_bw_usage_cell_update > @@ -5794,11 +5795,11 @@ class ComputeManager(manager.Manager): # is just in the process of migrating to another host. # This implies that the compute source must relinquish # control to the compute destination. - LOG.info(_("During the sync_power process the " - "instance has moved from " - "host %(src)s to host %(dst)s") % - {'src': db_instance.host, - 'dst': self.host}, + LOG.info(_LI("During the sync_power process the " + "instance has moved from " + "host %(src)s to host %(dst)s"), + {'src': db_instance.host, + 'dst': self.host}, instance=db_instance) return elif db_instance.task_state is not None: @@ -5807,8 +5808,8 @@ class ComputeManager(manager.Manager): # but the actual VM has not showed up on the hypervisor # yet. In this case, let's allow the loop to continue # and run the state sync in a later round - LOG.info(_("During sync_power_state the instance has a " - "pending task (%(task)s). Skip."), + LOG.info(_LI("During sync_power_state the instance has a " + "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return @@ -5953,7 +5954,7 @@ class ComputeManager(manager.Manager): if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) - LOG.info(_('Reclaiming deleted instance'), instance=instance) + LOG.info(_LI('Reclaiming deleted instance'), instance=instance) try: self._delete_instance(context, instance, bdms, quotas) except Exception as e: @@ -6040,10 +6041,10 @@ class ComputeManager(manager.Manager): instance['name'], instance=instance) elif action == 'shutdown': - LOG.info(_("Powering off instance with name label " - "'%s' which is marked as " - "DELETED but still present on host."), - instance['name'], instance=instance) + LOG.info(_LI("Powering off instance with name label " + "'%s' which is marked as " + "DELETED but still present on host."), + instance['name'], instance=instance) try: try: # disable starting the instance @@ -6058,9 +6059,9 @@ class ComputeManager(manager.Manager): LOG.warn(msg, instance=instance, exc_info=True) elif action == 'reap': - LOG.info(_("Destroying instance with name label " - "'%s' which is marked as " - "DELETED but still present on host."), + LOG.info(_LI("Destroying instance with name label " + "'%s' which is marked as " + "DELETED but still present on host."), instance['name'], instance=instance) self.instance_events.clear_events_for_instance(instance) try: @@ -6104,8 +6105,8 @@ class ComputeManager(manager.Manager): with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() - LOG.info(_("Setting instance back to %(state)s after: " - "%(error)s") % + LOG.info(_LI("Setting instance back to %(state)s after: " + "%(error)s"), {'state': instance_state, 'error': error}, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, @@ -6114,7 +6115,7 @@ class ComputeManager(manager.Manager): except exception.InstanceFaultRollback as error: if quotas: quotas.rollback() - LOG.info(_("Setting instance back to ACTIVE after: %s"), + LOG.info(_LI("Setting instance back to ACTIVE after: %s"), error, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=vm_states.ACTIVE, diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 423143d3d416..fa2d5743c011 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -32,7 +32,7 @@ from nova.compute import task_states from nova.compute import vm_states from nova import conductor from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LI from nova import objects from nova.objects import base as obj_base from nova.openstack.common import log as logging @@ -400,14 +400,16 @@ class ResourceTracker(object): self._create(context, resources) if self.pci_tracker: self.pci_tracker.set_compute_node_id(self.compute_node['id']) - LOG.info(_('Compute_service record created for %(host)s:%(node)s') - % {'host': self.host, 'node': self.nodename}) + LOG.info(_LI('Compute_service record created for ' + '%(host)s:%(node)s'), + {'host': self.host, 'node': self.nodename}) else: # just update the record: self._update(context, resources) - LOG.info(_('Compute_service record updated for %(host)s:%(node)s') - % {'host': self.host, 'node': self.nodename}) + LOG.info(_LI('Compute_service record updated for ' + '%(host)s:%(node)s'), + {'host': self.host, 'node': self.nodename}) def _write_ext_resources(self, resources): resources['stats'] = {} diff --git a/nova/conductor/api.py b/nova/conductor/api.py index b7e3257a5251..af8729dc3cfb 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -20,7 +20,7 @@ from oslo import messaging from nova import baserpc from nova.conductor import manager from nova.conductor import rpcapi -from nova.i18n import _ +from nova.i18n import _, _LI from nova.openstack.common import log as logging from nova import utils @@ -312,8 +312,8 @@ class API(LocalAPI): self.base_rpcapi.ping(context, '1.21 GigaWatts', timeout=timeout) if has_timedout: - LOG.info(_('nova-conductor connection ' - 'established successfully')) + LOG.info(_LI('nova-conductor connection ' + 'established successfully')) break except messaging.MessagingTimeout: has_timedout = True diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a600f2844d40..55865c5b5045 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -5119,9 +5119,9 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, rd_bytes < current_usage['curr_read_bytes'] or wr_req < current_usage['curr_writes'] or wr_bytes < current_usage['curr_write_bytes']): - LOG.info(_("Volume(%s) has lower stats then what is in " - "the database. Instance must have been rebooted " - "or crashed. Updating totals.") % id) + LOG.info(_LI("Volume(%s) has lower stats then what is in " + "the database. Instance must have been rebooted " + "or crashed. Updating totals."), id) if not update_totals: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads']) diff --git a/nova/filters.py b/nova/filters.py index 1ecd98824937..d0b56b5ea665 100644 --- a/nova/filters.py +++ b/nova/filters.py @@ -17,7 +17,7 @@ Filter support """ -from nova.i18n import _ +from nova.i18n import _LI from nova import loadables from nova.openstack.common import log as logging @@ -81,7 +81,7 @@ class BaseFilterHandler(loadables.BaseLoader): return list_objs = list(objs) if not list_objs: - LOG.info(_("Filter %s returned 0 hosts"), cls_name) + LOG.info(_LI("Filter %s returned 0 hosts"), cls_name) break LOG.debug("Filter %(cls_name)s returned " "%(obj_len)d host(s)", diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py index 849e884c43df..a10bdb9271bd 100644 --- a/nova/hacking/checks.py +++ b/nova/hacking/checks.py @@ -296,16 +296,7 @@ def validate_log_translations(logical_line, physical_line, filename): # Translations are not required in the test directory # and the Xen utilities if ("nova/tests" in filename or - "plugins/xenserver/xenapi/etc/xapi.d" in filename or - # TODO(Mike_D):Needs to be remove with: - # Iaebb239ef20a0da3df1e3552baf26f412d0fcdc0 - "nova/compute" in filename or - "nova/cells" in filename or - "nova/image" in filename or - "nova/conductor" in filename or - "nova/wsgi.py" in filename or - "nova/filters.py" in filename or - "nova/db" in filename): + "plugins/xenserver/xenapi/etc/xapi.d" in filename): return if pep8.noqa(physical_line): return diff --git a/nova/image/download/file.py b/nova/image/download/file.py index a416835c8764..a3acf9f7fb62 100644 --- a/nova/image/download/file.py +++ b/nova/image/download/file.py @@ -18,7 +18,7 @@ import logging from oslo.config import cfg from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LI import nova.image.download.base as xfer_base import nova.virt.libvirt.utils as lv_utils @@ -161,7 +161,7 @@ class FileTransfer(xfer_base.TransferBase): glance_mountpoint, url_parts.path) lv_utils.copy_image(source_file, dst_file) - LOG.info(_('Copied %(source_file)s using %(module_str)s') % + LOG.info(_LI('Copied %(source_file)s using %(module_str)s'), {'source_file': source_file, 'module_str': str(self)}) diff --git a/nova/image/s3.py b/nova/image/s3.py index 9ea6f770088d..fbd512c28d1b 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -33,7 +33,7 @@ from nova.api.ec2 import ec2utils import nova.cert.rpcapi from nova.compute import arch from nova import exception -from nova.i18n import _, _LE +from nova.i18n import _, _LE, _LI from nova.image import glance from nova.openstack.common import log as logging from nova import utils @@ -383,7 +383,7 @@ class S3ImageService(object): shutil.rmtree(image_path) except exception.ImageNotFound: - LOG.info(_("Image %s was deleted underneath us"), image_uuid) + LOG.info(_LI("Image %s was deleted underneath us"), image_uuid) return eventlet.spawn_n(delayed_create) diff --git a/nova/wsgi.py b/nova/wsgi.py index 4998cbfb30a1..e79ba9892aa7 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -35,7 +35,7 @@ import webob.dec import webob.exc from nova import exception -from nova.i18n import _, _LE +from nova.i18n import _, _LE, _LI from nova.openstack.common import log as logging wsgi_opts = [ @@ -135,7 +135,7 @@ class Server(object): raise (self.host, self.port) = self._socket.getsockname()[0:2] - LOG.info(_("%(name)s listening on %(host)s:%(port)s"), + LOG.info(_LI("%(name)s listening on %(host)s:%(port)s"), {'name': self.name, 'host': self.host, 'port': self.port}) def start(self): @@ -238,7 +238,7 @@ class Server(object): :returns: None """ - LOG.info(_("Stopping WSGI server.")) + LOG.info(_LI("Stopping WSGI server.")) if self._server is not None: # Resize pool to stop new requests from being processed @@ -258,7 +258,7 @@ class Server(object): self._pool.waitall() self._server.wait() except greenlet.GreenletExit: - LOG.info(_("WSGI server has stopped.")) + LOG.info(_LI("WSGI server has stopped.")) class Request(webob.Request):