diff --git a/nova/network/neutron.py b/nova/network/neutron.py index 745d522f8c4e..f3ac861be9e2 100644 --- a/nova/network/neutron.py +++ b/nova/network/neutron.py @@ -528,7 +528,7 @@ class API(base.Base): # need resource allocation manipulation in placement but might also # need a new scheduling if resource on this host is not available. if port.get(constants.RESOURCE_REQUEST, None): - msg = _( + msg = ( "The auto-created port %(port_id)s is being deleted due " "to its network having QoS policy.") LOG.info(msg, {'port_id': port_id}) diff --git a/nova/privsep/libvirt.py b/nova/privsep/libvirt.py index 9f4bb3b0503c..b7247c5cd096 100644 --- a/nova/privsep/libvirt.py +++ b/nova/privsep/libvirt.py @@ -26,7 +26,6 @@ from oslo_log import log as logging from oslo_utils import units from oslo_utils import uuidutils -from nova.i18n import _ import nova.privsep @@ -167,7 +166,7 @@ def readpty(path): return f.read() - except Exception as e: + except Exception as exc: # NOTE(mikal): dear internet, I see you looking at me with your # judging eyes. There's a story behind why we do this. You see, the # previous implementation did this: @@ -186,8 +185,9 @@ def readpty(path): # # Therefore for now we log the errors, but keep on rolling. Volunteers # to help clean this up are welcome and will receive free beverages. - LOG.info(_('Ignored error while reading from instance console ' - 'pty: %s'), e) + LOG.info( + 'Ignored error while reading from instance console pty: %s', exc + ) return '' diff --git a/nova/scheduler/request_filter.py b/nova/scheduler/request_filter.py index de05d3d1e46d..58aea774880d 100644 --- a/nova/scheduler/request_filter.py +++ b/nova/scheduler/request_filter.py @@ -181,8 +181,9 @@ def require_image_type_support(ctxt, request_spec): disk_format = request_spec.image.disk_format trait_name = 'COMPUTE_IMAGE_TYPE_%s' % disk_format.upper() if not hasattr(os_traits, trait_name): - LOG.error(('Computed trait name %r is not valid; ' - 'is os-traits up to date?'), trait_name) + LOG.error( + 'Computed trait name %r is not valid; is os-traits up to date?', + trait_name) return False request_spec.root_required.add(trait_name) @@ -218,8 +219,8 @@ def transform_image_metadata(ctxt, request_spec): '-', '_').upper() trait_name = f'{prefix}_{value}' if not hasattr(os_traits, trait_name): - LOG.error(('Computed trait name %r is not valid; ' - 'is os-traits up to date?'), trait_name) + LOG.error('Computed trait name %r is not valid; ' + 'is os-traits up to date?', trait_name) return False trait_names.append(trait_name) diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index c79ae77371a0..79d629f2830c 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -416,7 +416,7 @@ def teardown_container(container_dir, container_root_device=None): LOG.debug('No release necessary for block device %s', container_root_device) except Exception: - LOG.exception(_('Failed to teardown container filesystem')) + LOG.exception('Failed to teardown container filesystem') def clean_lxc_namespace(container_dir): @@ -429,7 +429,7 @@ def clean_lxc_namespace(container_dir): img = _DiskImage(image=None, mount_dir=container_dir) img.umount() except Exception: - LOG.exception(_('Failed to umount container filesystem')) + LOG.exception('Failed to umount container filesystem') def inject_data_into_fs(fs, key, net, metadata, admin_password, files, diff --git a/nova/virt/driver.py b/nova/virt/driver.py index a38809b0da93..61abbe186f8b 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -1894,7 +1894,7 @@ def load_compute_driver(virtapi, compute_driver=None): return driver raise ValueError() except ImportError: - LOG.exception(_("Unable to load the virtualization driver")) + LOG.exception("Unable to load the virtualization driver") sys.exit(1) except ValueError: LOG.exception("Compute driver '%s' from 'nova.virt' is not of type " diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py index 05b31cbbf35a..63f9e8d708bf 100644 --- a/nova/virt/hyperv/snapshotops.py +++ b/nova/virt/hyperv/snapshotops.py @@ -22,7 +22,6 @@ from os_win import utilsfactory from oslo_log import log as logging from nova.compute import task_states -from nova.i18n import _ from nova.image import glance from nova.virt.hyperv import pathutils @@ -111,7 +110,7 @@ class SnapshotOps(object): LOG.debug("Removing snapshot %s", image_id) self._vmutils.remove_vm_snapshot(snapshot_path) except Exception: - LOG.exception(_('Failed to remove snapshot for VM %s'), + LOG.exception('Failed to remove snapshot for VM %s', instance_name, instance=instance) if export_dir: LOG.debug('Removing directory: %s', export_dir) diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 356e6fc07eb6..8f38588b4fb1 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -745,8 +745,7 @@ class VMOps(object): self._delete_disk_files(instance_name) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_('Failed to destroy instance: %s'), - instance_name) + LOG.exception('Failed to destroy instance: %s', instance_name) def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index 2c53e9b44cf6..d81d34ae9fd8 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -77,8 +77,8 @@ class VolumeOps(object): tries_left -= 1 if not tries_left: LOG.exception( - _("Failed to attach volume %(connection_info)s " - "to instance %(instance_name)s. "), + "Failed to attach volume %(connection_info)s " + "to instance %(instance_name)s.", {'connection_info': strutils.mask_dict_password(connection_info), 'instance_name': instance_name}) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 6b9ba87df7ff..5c0facb62850 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1825,17 +1825,18 @@ class LibvirtDriver(driver.ComputeDriver): except libvirt.libvirtError as ex: with excutils.save_and_reraise_exception(): if 'Incorrect number of padding bytes' in six.text_type(ex): - LOG.warning(_('Failed to attach encrypted volume due to a ' - 'known Libvirt issue, see the following bug ' - 'for details: ' - 'https://bugzilla.redhat.com/1447297')) + LOG.warning( + 'Failed to attach encrypted volume due to a known ' + 'Libvirt issue, see the following bug for details: ' + 'https://bugzilla.redhat.com/1447297' + ) else: - LOG.exception(_('Failed to attach volume at mountpoint: ' - '%s'), mountpoint, instance=instance) + LOG.exception('Failed to attach volume at mountpoint: %s', + mountpoint, instance=instance) self._disconnect_volume(context, connection_info, instance, encryption=encryption) except Exception: - LOG.exception(_('Failed to attach volume at mountpoint: %s'), + LOG.exception('Failed to attach volume at mountpoint: %s', mountpoint, instance=instance) with excutils.save_and_reraise_exception(): self._disconnect_volume(context, connection_info, instance, @@ -2468,7 +2469,7 @@ class LibvirtDriver(driver.ComputeDriver): image_file) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to snapshot image")) + LOG.exception("Failed to snapshot image") failed_snap = metadata.pop('location', None) if failed_snap: failed_snap = {'url': str(failed_snap)} @@ -2677,8 +2678,8 @@ class LibvirtDriver(driver.ComputeDriver): snapshot_id, status) except Exception: - LOG.exception(_('Failed to send updated snapshot status ' - 'to volume service.')) + LOG.exception('Failed to send updated snapshot status ' + 'to volume service.') def _volume_snapshot_create(self, context, instance, guest, volume_id, new_file): @@ -2789,8 +2790,8 @@ class LibvirtDriver(driver.ComputeDriver): # If the image says that quiesce is required then we fail. if self._requires_quiesce(image_meta): raise - LOG.exception(_('Unable to create quiesced VM snapshot, ' - 'attempting again with quiescing disabled.'), + LOG.exception('Unable to create quiesced VM snapshot, ' + 'attempting again with quiescing disabled.', instance=instance) except (exception.InstanceQuiesceNotSupported, exception.QemuGuestAgentNotEnabled) as err: @@ -2804,8 +2805,8 @@ class LibvirtDriver(driver.ComputeDriver): guest.snapshot(snapshot, no_metadata=True, disk_only=True, reuse_ext=True, quiesce=False) except libvirt.libvirtError: - LOG.exception(_('Unable to create VM snapshot, ' - 'failing volume_snapshot operation.'), + LOG.exception('Unable to create VM snapshot, ' + 'failing volume_snapshot operation.', instance=instance) raise @@ -2855,9 +2856,8 @@ class LibvirtDriver(driver.ComputeDriver): volume_id, create_info['new_file']) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_('Error occurred during ' - 'volume_snapshot_create, ' - 'sending error status to Cinder.'), + LOG.exception('Error occurred during volume_snapshot_create, ' + 'sending error status to Cinder.', instance=instance) self._volume_snapshot_update_status( context, snapshot_id, 'error') @@ -3110,9 +3110,8 @@ class LibvirtDriver(driver.ComputeDriver): snapshot_id, delete_info=delete_info) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_('Error occurred during ' - 'volume_snapshot_delete, ' - 'sending error status to Cinder.'), + LOG.exception('Error occurred during volume_snapshot_delete, ' + 'sending error status to Cinder.', instance=instance) self._volume_snapshot_update_status( context, snapshot_id, 'error_deleting') @@ -3385,11 +3384,11 @@ class LibvirtDriver(driver.ComputeDriver): elif error_code == libvirt.VIR_ERR_OPERATION_INVALID: raise exception.InstanceNotRunning(instance_id=instance.uuid) - LOG.exception(_('Error from libvirt while injecting an NMI to ' - '%(instance_uuid)s: ' - '[Error Code %(error_code)s] %(ex)s'), - {'instance_uuid': instance.uuid, - 'error_code': error_code, 'ex': ex}) + LOG.exception( + 'Error from libvirt while injecting an NMI to ' + '%(instance_uuid)s: [Error Code %(error_code)s] %(ex)s', + {'instance_uuid': instance.uuid, + 'error_code': error_code, 'ex': ex}) raise def suspend(self, context, instance): diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py index 3ebbdbcf9138..b27babc74f66 100644 --- a/nova/virt/libvirt/host.py +++ b/nova/virt/libvirt/host.py @@ -190,7 +190,7 @@ class Host(object): try: handler() except Exception: - LOG.exception(_('Exception handling connection event')) + LOG.exception('Exception handling connection event') finally: self._conn_event_handler_queue.task_done() @@ -503,10 +503,8 @@ class Host(object): try: conn = self._get_connection() except libvirt.libvirtError as ex: - LOG.exception(_("Connection to libvirt failed: %s"), ex) - payload = dict(ip=CONF.my_ip, - method='_connect', - reason=ex) + LOG.exception("Connection to libvirt failed: %s", ex) + payload = {'ip': CONF.my_ip, 'method': '_connect', 'reason': ex} ctxt = nova_context.get_admin_context() rpc.get_notifier('compute').error(ctxt, 'compute.libvirt.error', diff --git a/nova/virt/libvirt/storage/rbd_utils.py b/nova/virt/libvirt/storage/rbd_utils.py index 8a4782f5a3c7..97440b81e444 100644 --- a/nova/virt/libvirt/storage/rbd_utils.py +++ b/nova/virt/libvirt/storage/rbd_utils.py @@ -79,7 +79,7 @@ class RBDVolumeProxy(object): driver._disconnect_from_rados(client, ioctx) except rbd.Error: with excutils.save_and_reraise_exception(): - LOG.exception(_("error opening rbd image %s"), name) + LOG.exception("error opening rbd image %s", name) driver._disconnect_from_rados(client, ioctx) self.driver = driver diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 2bd695d48482..dacb7252258b 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -629,7 +629,7 @@ class LibvirtGenericVIFDriver(object): vnic_mac, device_id, fabric, network_model.VIF_TYPE_IB_HOSTDEV, pci_slot) except processutils.ProcessExecutionError: - LOG.exception(_("Failed while plugging ib hostdev vif"), + LOG.exception("Failed while plugging ib hostdev vif", instance=instance) def plug_hw_veb(self, instance, vif): @@ -672,7 +672,7 @@ class LibvirtGenericVIFDriver(object): nova.privsep.linux_net.create_tap_dev(dev) nova.privsep.libvirt.plug_midonet_vif(port_id, dev) except processutils.ProcessExecutionError: - LOG.exception(_("Failed while plugging vif"), instance=instance) + LOG.exception("Failed while plugging vif", instance=instance) def plug_iovisor(self, instance, vif): """Plug using PLUMgrid IO Visor Driver @@ -689,7 +689,7 @@ class LibvirtGenericVIFDriver(object): nova.privsep.libvirt.plug_plumgrid_vif( dev, iface_id, vif['address'], net_id, tenant_id) except processutils.ProcessExecutionError: - LOG.exception(_("Failed while plugging vif"), instance=instance) + LOG.exception("Failed while plugging vif", instance=instance) def plug_tap(self, instance, vif): """Plug a VIF_TYPE_TAP virtual interface.""" @@ -764,7 +764,7 @@ class LibvirtGenericVIFDriver(object): try: nova.privsep.libvirt.unplug_infiniband_vif(fabric, vnic_mac) except Exception: - LOG.exception(_("Failed while unplugging ib hostdev vif")) + LOG.exception("Failed while unplugging ib hostdev vif") def unplug_hw_veb(self, instance, vif): # TODO(sean-k-mooney): remove in Train after backporting 0 mac @@ -795,7 +795,7 @@ class LibvirtGenericVIFDriver(object): nova.privsep.libvirt.unplug_midonet_vif(port_id) nova.privsep.linux_net.delete_net_dev(dev) except processutils.ProcessExecutionError: - LOG.exception(_("Failed while unplugging vif"), instance=instance) + LOG.exception("Failed while unplugging vif", instance=instance) def unplug_tap(self, instance, vif): """Unplug a VIF_TYPE_TAP virtual interface.""" @@ -803,7 +803,7 @@ class LibvirtGenericVIFDriver(object): try: nova.privsep.linux_net.delete_net_dev(dev) except processutils.ProcessExecutionError: - LOG.exception(_("Failed while unplugging vif"), instance=instance) + LOG.exception("Failed while unplugging vif", instance=instance) def unplug_iovisor(self, instance, vif): """Unplug using PLUMgrid IO Visor Driver @@ -816,7 +816,7 @@ class LibvirtGenericVIFDriver(object): nova.privsep.libvirt.unplug_plumgrid_vif(dev) nova.privsep.linux_net.delete_net_dev(dev) except processutils.ProcessExecutionError: - LOG.exception(_("Failed while unplugging vif"), instance=instance) + LOG.exception("Failed while unplugging vif", instance=instance) def _unplug_os_vif(self, instance, vif): instance_info = os_vif_util.nova_to_osvif_instance(instance) diff --git a/nova/virt/libvirt/volume/mount.py b/nova/virt/libvirt/volume/mount.py index a02fe460a385..95c4a671845c 100644 --- a/nova/virt/libvirt/volume/mount.py +++ b/nova/virt/libvirt/volume/mount.py @@ -24,7 +24,6 @@ import six import nova.conf from nova import exception -from nova.i18n import _ import nova.privsep.fs import nova.privsep.path @@ -312,13 +311,12 @@ class _HostMountState(object): # We're not going to raise the exception because we're # in the desired state anyway. However, this is still # unusual so we'll log it. - LOG.exception(_('Error mounting %(fstype)s export ' - '%(export)s on %(mountpoint)s. ' - 'Continuing because mountpount is ' - 'mounted despite this.'), - {'fstype': fstype, 'export': export, - 'mountpoint': mountpoint}) - + LOG.exception( + 'Error mounting %(fstypes export %(export)s on ' + '%(mountpoint)s. Continuing because mountpount is ' + 'mounted despite this.', + {'fstype': fstype, 'export': export, + 'mountpoint': mountpoint}) else: # If the mount failed there's no reason for us to keep # a record of it. It will be created again if the diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py index b2a474e5f751..5355633b83d8 100644 --- a/nova/virt/libvirt/volume/quobyte.py +++ b/nova/virt/libvirt/volume/quobyte.py @@ -100,7 +100,7 @@ def umount_volume(mnt_base): if 'Device or resource busy' in six.text_type(exc): LOG.error("The Quobyte volume at %s is still in use.", mnt_base) else: - LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"), + LOG.exception("Couldn't unmount the Quobyte Volume at %s", mnt_base) diff --git a/nova/virt/libvirt/volume/remotefs.py b/nova/virt/libvirt/volume/remotefs.py index 6fc2cb414309..19cb1b43fe18 100644 --- a/nova/virt/libvirt/volume/remotefs.py +++ b/nova/virt/libvirt/volume/remotefs.py @@ -23,7 +23,6 @@ from oslo_utils import importutils import six import nova.conf -from nova.i18n import _ import nova.privsep.fs from nova import utils @@ -64,7 +63,7 @@ def unmount_share(mount_path, export_path): if 'target is busy' in six.text_type(exc): LOG.debug("The share %s is still in use.", export_path) else: - LOG.exception(_("Couldn't unmount the share %s"), export_path) + LOG.exception("Couldn't unmount the share %s", export_path) class RemoteFilesystem(object): diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a87d47d9cda2..eb0261363c9e 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1390,7 +1390,7 @@ def destroy_vm(session, instance, vm_ref=None): session._wait_for_task(destroy_task) LOG.info("Destroyed the VM", instance=instance) except Exception: - LOG.exception(_('Destroy VM failed'), instance=instance) + LOG.exception('Destroy VM failed', instance=instance) def create_virtual_disk(session, dc_ref, adapter_type, disk_type, diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 7428f2b0d274..f030c1a3ac9f 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -1125,7 +1125,7 @@ class VMwareVMOps(object): LOG.warning('Instance does not exist on backend', instance=instance) except Exception: - LOG.exception(_('Destroy instance failed'), instance=instance) + LOG.exception('Destroy instance failed', instance=instance) finally: vm_util.vm_ref_cache_delete(instance.uuid) diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 5b9cde85dec5..d996831d07a8 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -48,10 +48,10 @@ CONF = nova.conf.CONF def invalid_option(option_name, recommended_value): - LOG.exception(_('Current value of ' - 'CONF.xenserver.%(option)s option incompatible with ' - 'CONF.xenserver.independent_compute=True. ' - 'Consider using "%(recommended)s"'), + LOG.exception('Current value of ' + 'CONF.xenserver.%(option)s option incompatible with ' + 'CONF.xenserver.independent_compute=True. ' + 'Consider using "%(recommended)s"', {'option': option_name, 'recommended': recommended_value}) raise exception.NotSupportedWithOption( @@ -139,7 +139,7 @@ class XenAPIDriver(driver.ComputeDriver): try: vm_utils.cleanup_attached_vdis(self._session) except Exception: - LOG.exception(_('Failure while cleaning up attached VDIs')) + LOG.exception('Failure while cleaning up attached VDIs') def instance_exists(self, instance): """Checks existence of an instance on the host. diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py index cf140a5fe6dd..7bd5d6fa937b 100644 --- a/nova/virt/xenapi/host.py +++ b/nova/virt/xenapi/host.py @@ -105,9 +105,10 @@ class Host(object): break except XenAPI.Failure: - LOG.exception(_('Unable to migrate VM %(vm_ref)s ' - 'from %(host)s'), - {'vm_ref': vm_ref, 'host': host}) + LOG.exception( + 'Unable to migrate VM %(vm_refs from %(host)s', + {'vm_ref': vm_ref, 'host': host}, + ) instance.host = host instance.vm_state = vm_states.ACTIVE instance.save() @@ -371,7 +372,7 @@ class HostState(object): allocated += vdi_physical physical_used += vdi_physical except (ValueError, self._session.XenAPI.Failure): - LOG.exception(_('Unable to get size for vdi %s'), vdi_ref) + LOG.exception('Unable to get size for vdi %s', vdi_ref) return (allocated, physical_used) @@ -511,7 +512,7 @@ def call_xenhost(session, method, arg_dict): return '' return jsonutils.loads(result) except ValueError: - LOG.exception(_("Unable to get updated status")) + LOG.exception("Unable to get updated status") return None except session.XenAPI.Failure as e: LOG.error("The call to %(method)s returned " @@ -531,7 +532,7 @@ def _call_host_management(session, method, *args): return '' return jsonutils.loads(result) except ValueError: - LOG.exception(_("Unable to get updated status")) + LOG.exception("Unable to get updated status") return None except session.XenAPI.Failure as e: LOG.error("The call to %(method)s returned an error: %(e)s.", diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index 5487d4fb8f79..1265ec9975fb 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -54,8 +54,8 @@ class ResourcePool(object): aggregate.update_metadata(metadata) op(host) except Exception: - LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable ' - 'state during operation on %(host)s'), + LOG.exception('Aggregate %(aggregate_id)s: unrecoverable ' + 'state during operation on %(host)s', {'aggregate_id': aggregate.id, 'host': host}) def add_to_aggregate(self, context, aggregate, host, slave_info=None): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 842657d06324..79594af5a913 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -264,7 +264,7 @@ def destroy_vm(session, instance, vm_ref): try: session.VM.destroy(vm_ref) except session.XenAPI.Failure: - LOG.exception(_('Destroy VM failed')) + LOG.exception('Destroy VM failed') return LOG.debug("VM destroyed", instance=instance) @@ -280,7 +280,7 @@ def clean_shutdown_vm(session, instance, vm_ref): try: session.call_xenapi('VM.clean_shutdown', vm_ref) except session.XenAPI.Failure: - LOG.exception(_('Shutting down VM (cleanly) failed.')) + LOG.exception('Shutting down VM (cleanly) failed.') return False return True @@ -295,7 +295,7 @@ def hard_shutdown_vm(session, instance, vm_ref): try: session.call_xenapi('VM.hard_shutdown', vm_ref) except session.XenAPI.Failure: - LOG.exception(_('Shutting down VM (hard) failed')) + LOG.exception('Shutting down VM (hard) failed') return False return True @@ -362,14 +362,14 @@ def unplug_vbd(session, vbd_ref, this_vm_ref): {'vbd_ref': vbd_ref, 'num_attempt': num_attempt, 'max_attempts': max_attempts, 'err': err}) else: - LOG.exception(_('Unable to unplug VBD')) + LOG.exception('Unable to unplug VBD') raise exception.StorageError( - reason=_('Unable to unplug VBD %s') % vbd_ref) + reason=_('Unable to unplug VBD %s') % vbd_ref) raise exception.StorageError( - reason=_('Reached maximum number of retries ' - 'trying to unplug VBD %s') - % vbd_ref) + reason=_('Reached maximum number of retries ' + 'trying to unplug VBD %s') + % vbd_ref) def destroy_vbd(session, vbd_ref): @@ -377,9 +377,9 @@ def destroy_vbd(session, vbd_ref): try: session.call_xenapi('VBD.destroy', vbd_ref) except session.XenAPI.Failure: - LOG.exception(_('Unable to destroy VBD')) + LOG.exception('Unable to destroy VBD') raise exception.StorageError( - reason=_('Unable to destroy VBD %s') % vbd_ref) + reason=_('Unable to destroy VBD %s') % vbd_ref) def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', @@ -1534,7 +1534,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id, return {vdi_role: dict(uuid=vdi_uuid, file=None)} except (session.XenAPI.Failure, IOError, OSError) as e: # We look for XenAPI and OS failures. - LOG.exception(_("Failed to fetch glance image"), instance=instance) + LOG.exception("Failed to fetch glance image", instance=instance) e.args = e.args + ([dict(type=ImageType.to_string(image_type), uuid=vdi_uuid, file=filename)],) @@ -1629,7 +1629,7 @@ def lookup_vm_vdis(session, vm_ref): # This is not an attached volume vdi_refs.append(vdi_ref) except session.XenAPI.Failure: - LOG.exception(_('"Look for the VDIs failed')) + LOG.exception('Look for the VDIs failed') return vdi_refs @@ -1809,7 +1809,7 @@ def compile_diagnostics(vm_rec): return diags except expat.ExpatError as e: - LOG.exception(_('Unable to parse rrd of %s'), e) + LOG.exception('Unable to parse rrd of %s', e) return {"Unable to retrieve diagnostics": e} @@ -1959,8 +1959,8 @@ def _get_rrd(server, vm_uuid): vm_uuid)) return xml.read() except IOError: - LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with ' - 'server details: %(server)s.'), + LOG.exception('Unable to obtain RRD XML for VM %(vm_uuid)s with ' + 'server details: %(server)s.', {'vm_uuid': vm_uuid, 'server': server}) return None diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 163db947c51a..756ac2ec0d8e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1128,8 +1128,8 @@ class VMOps(object): undo_mgr, old_vdi_ref) transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid) except Exception as error: - LOG.exception(_("_migrate_disk_resizing_down failed. Restoring " - "orig vm"), instance=instance) + LOG.exception("_migrate_disk_resizing_down failed. Restoring " + "orig vm", instance=instance) undo_mgr._rollback() raise exception.InstanceFaultRollback(error) @@ -1311,16 +1311,16 @@ class VMOps(object): transfer_ephemeral_disks_then_all_leaf_vdis() except Exception as error: - LOG.exception(_("_migrate_disk_resizing_up failed. " - "Restoring orig vm due_to: %s."), - error, instance=instance) + LOG.exception( + "_migrate_disk_resizing_up failed; restoring orig vm due_to: " + "%s.", error, instance=instance) try: self._restore_orig_vm_and_cleanup_orphan(instance) # TODO(johngarbutt) should also cleanup VHDs at destination except Exception as rollback_error: - LOG.warning("_migrate_disk_resizing_up failed to " - "rollback: %s", rollback_error, - instance=instance) + LOG.warning( + "_migrate_disk_resizing_up failed to rollback: %s", + rollback_error, instance=instance) raise exception.InstanceFaultRollback(error) def _apply_orig_vm_name_label(self, instance, vm_ref): @@ -1708,7 +1708,7 @@ class VMOps(object): sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid) except Exception: - LOG.exception(_('Failed to find an SR for volume %s'), + LOG.exception('Failed to find an SR for volume %s', volume_id, instance=instance) try: @@ -1719,7 +1719,7 @@ class VMOps(object): 'instance but no SR was found for it', volume_id, instance=instance) except Exception: - LOG.exception(_('Failed to forget the SR for volume %s'), + LOG.exception('Failed to forget the SR for volume %s', volume_id, instance=instance) return @@ -1926,7 +1926,7 @@ class VMOps(object): raw_console_data = vm_management.get_console_log( self._session, dom_id) except self._session.XenAPI.Failure: - LOG.exception(_("Guest does not have a console available")) + LOG.exception("Guest does not have a console available") raise exception.ConsoleNotAvailable() return zlib.decompress(base64.b64decode(raw_console_data)) @@ -2261,7 +2261,7 @@ class VMOps(object): nwref, options) except self._session.XenAPI.Failure: - LOG.exception(_('Migrate Receive failed')) + LOG.exception('Migrate Receive failed') msg = _('Migrate Receive failed') raise exception.MigrationPreCheckError(reason=msg) return migrate_data @@ -2564,7 +2564,7 @@ class VMOps(object): self._call_live_migrate_command( "VM.migrate_send", vm_ref, migrate_data) except self._session.XenAPI.Failure: - LOG.exception(_('Migrate Send failed')) + LOG.exception('Migrate Send failed') raise exception.MigrationError( reason=_('Migrate Send failed')) @@ -2574,9 +2574,11 @@ class VMOps(object): else: host_ref = self._get_host_opaque_ref(destination_hostname) if not host_ref: - LOG.exception(_("Destination host %s was not found in the" - " same shared storage pool as source " - "host."), destination_hostname) + LOG.exception( + "Destination host %s was not found in the same shared " + "storage pool as source host.", + destination_hostname + ) raise exception.MigrationError( reason=_('No host with name %s found') % destination_hostname) @@ -2626,7 +2628,7 @@ class VMOps(object): if sr_ref: volume_utils.forget_sr(self._session, sr_ref) except Exception: - LOG.exception(_('Failed to forget the SR for volume %s'), + LOG.exception('Failed to forget the SR for volume %s', params['id'], instance=instance) # delete VIF and network in destination host @@ -2640,8 +2642,10 @@ class VMOps(object): try: self.vif_driver.delete_network_and_bridge(instance, vif['id']) except Exception: - LOG.exception(_('Failed to delete networks and bridges with ' - 'VIF %s'), vif['id'], instance=instance) + LOG.exception( + 'Failed to delete networks and bridges with VIF %s', + vif['id'], instance=instance, + ) def get_per_instance_usage(self): """Get usage info about each active instance.""" @@ -2703,7 +2707,7 @@ class VMOps(object): device=device) except exception.NovaException: with excutils.save_and_reraise_exception(): - LOG.exception(_('attach network interface %s failed.'), + LOG.exception('attach network interface %s failed.', vif['id'], instance=instance) try: self.vif_driver.unplug(instance, vif, vm_ref) @@ -2726,5 +2730,5 @@ class VMOps(object): raise except exception.NovaException: with excutils.save_and_reraise_exception(): - LOG.exception(_('detach network interface %s failed.'), + LOG.exception('detach network interface %s failed.', vif['id'], instance=instance) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 3b706b5cc0a6..9b9eb44bf111 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -175,7 +175,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): session.call_xenapi("SR.scan", sr_ref) vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) except session.XenAPI.Failure: - LOG.exception(_('Unable to introduce VDI on SR')) + LOG.exception('Unable to introduce VDI on SR') raise exception.StorageError( reason=_('Unable to introduce VDI on SR %s') % sr_ref) @@ -190,7 +190,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) LOG.debug(vdi_rec) except session.XenAPI.Failure: - LOG.exception(_('Unable to get record of VDI')) + LOG.exception('Unable to get record of VDI') raise exception.StorageError( reason=_('Unable to get record of VDI %s on') % vdi_ref) @@ -212,7 +212,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): vdi_rec['xenstore_data'], vdi_rec['sm_config']) except session.XenAPI.Failure: - LOG.exception(_('Unable to introduce VDI for SR')) + LOG.exception('Unable to introduce VDI for SR') raise exception.StorageError( reason=_('Unable to introduce VDI for SR %s') % sr_ref) @@ -310,7 +310,7 @@ def find_sr_from_vbd(session, vbd_ref): vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) except session.XenAPI.Failure: - LOG.exception(_('Unable to find SR from VBD')) + LOG.exception('Unable to find SR from VBD') raise exception.StorageError( reason=_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref @@ -321,9 +321,9 @@ def find_sr_from_vdi(session, vdi_ref): try: sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) except session.XenAPI.Failure: - LOG.exception(_('Unable to find SR from VDI')) + LOG.exception('Unable to find SR from VDI') raise exception.StorageError( - reason=_('Unable to find SR from VDI %s') % vdi_ref) + reason=_('Unable to find SR from VDI %s') % vdi_ref) return sr_ref diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 95a131e1900f..5bb56bf61019 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -800,8 +800,8 @@ class API(object): # NOTE: It is unnecessary to output BadRequest(400) error log, # because operators don't need to debug such cases. if getattr(ex, 'code', None) != 400: - LOG.error(('Create attachment failed for volume ' - '%(volume_id)s. Error: %(msg)s Code: %(code)s'), + LOG.error('Create attachment failed for volume ' + '%(volume_id)s. Error: %(msg)s Code: %(code)s', {'volume_id': volume_id, 'msg': six.text_type(ex), 'code': getattr(ex, 'code', None)}, @@ -826,8 +826,8 @@ class API(object): return translated_attach_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): - LOG.error(('Show attachment failed for attachment ' - '%(id)s. Error: %(msg)s Code: %(code)s'), + LOG.error('Show attachment failed for attachment ' + '%(id)s. Error: %(msg)s Code: %(code)s', {'id': attachment_id, 'msg': six.text_type(ex), 'code': getattr(ex, 'code', None)}) @@ -873,8 +873,8 @@ class API(object): return translated_attach_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): - LOG.error(('Update attachment failed for attachment ' - '%(id)s. Error: %(msg)s Code: %(code)s'), + LOG.error('Update attachment failed for attachment ' + '%(id)s. Error: %(msg)s Code: %(code)s', {'id': attachment_id, 'msg': six.text_type(ex), 'code': getattr(ex, 'code', None)}) @@ -890,8 +890,8 @@ class API(object): attachment_id) except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): - LOG.error(('Delete attachment failed for attachment ' - '%(id)s. Error: %(msg)s Code: %(code)s'), + LOG.error('Delete attachment failed for attachment ' + '%(id)s. Error: %(msg)s Code: %(code)s', {'id': attachment_id, 'msg': six.text_type(ex), 'code': getattr(ex, 'code', None)}) @@ -913,8 +913,8 @@ class API(object): attachment_id) except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): - LOG.error(('Complete attachment failed for attachment ' - '%(id)s. Error: %(msg)s Code: %(code)s'), + LOG.error('Complete attachment failed for attachment ' + '%(id)s. Error: %(msg)s Code: %(code)s', {'id': attachment_id, 'msg': six.text_type(ex), 'code': getattr(ex, 'code', None)})