Remove translation of log messages

The i18n team has decided not to translate the logs because it
seems like it not very useful; operators prefer to have them in
English so that they can search for those strings on the internet.

Partially fix on nova/virt other paths will be fixed on next commits

Change-Id: Ie7821aa4a5147cdb0616741bd1a1b1fc22080440
This commit is contained in:
Ngo Quoc Cuong 2017-05-23 10:18:45 +07:00
parent 3a5d592e60
commit 6c3520ac5b
61 changed files with 868 additions and 1001 deletions

View File

@ -24,9 +24,6 @@ from oslo_utils import excutils
from nova import block_device
import nova.conf
from nova import exception
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
CONF = nova.conf.CONF
@ -258,7 +255,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
if 'multipath_id' in self['connection_info']['data']:
connection_info['data']['multipath_id'] =\
self['connection_info']['data']['multipath_id']
LOG.info(_LI('preserve multipath_id %s'),
LOG.info('preserve multipath_id %s',
connection_info['data']['multipath_id'])
def driver_detach(self, context, instance, volume_api, virt_driver):
@ -266,12 +263,12 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
mp = self['mount_device']
volume_id = self.volume_id
LOG.info(_LI('Attempting to driver detach volume %(volume_id)s from '
'mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp},
LOG.info('Attempting to driver detach volume %(volume_id)s from '
'mountpoint %(mp)s', {'volume_id': volume_id, 'mp': mp},
instance=instance)
try:
if not virt_driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
LOG.warning('Detaching volume from unknown instance',
instance=instance)
encryption = encryptors.get_encryption_metadata(context,
@ -279,15 +276,15 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
virt_driver.detach_volume(connection_info, instance, mp,
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while '
'detaching volume %(volume_id)s from '
'%(mp)s : %(err)s'),
LOG.warning('Ignoring DiskNotFound exception while '
'detaching volume %(volume_id)s from '
'%(mp)s : %(err)s',
{'volume_id': volume_id, 'mp': mp,
'err': err}, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume '
'%(volume_id)s from %(mp)s'),
LOG.exception('Failed to detach volume '
'%(volume_id)s from %(mp)s',
{'volume_id': volume_id, 'mp': mp},
instance=instance)
volume_api.roll_detaching(context, volume_id)
@ -307,9 +304,9 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
LOG.debug("Skipping driver_detach during remote rebuild.",
instance=instance)
elif destroy_bdm:
LOG.error(_LE("Unable to call for a driver detach of volume "
"%(vol_id)s due to the instance being "
"registered to the remote host %(inst_host)s."),
LOG.error("Unable to call for a driver detach of volume "
"%(vol_id)s due to the instance being "
"registered to the remote host %(inst_host)s.",
{'vol_id': volume_id,
'inst_host': instance.host}, instance=instance)
@ -322,20 +319,20 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
stashed_connector = connection_info.get('connector')
if not stashed_connector:
# Volume was attached before we began stashing connectors
LOG.warning(_LW("Host mismatch detected, but stashed "
"volume connector not found. Instance host is "
"%(ihost)s, but volume connector host is "
"%(chost)s."),
LOG.warning("Host mismatch detected, but stashed "
"volume connector not found. Instance host is "
"%(ihost)s, but volume connector host is "
"%(chost)s.",
{'ihost': instance.host,
'chost': connector.get('host')})
elif stashed_connector.get('host') != instance.host:
# Unexpected error. The stashed connector is also not matching
# the needed instance host.
LOG.error(_LE("Host mismatch detected in stashed volume "
"connector. Will use local volume connector. "
"Instance host is %(ihost)s. Local volume "
"connector host is %(chost)s. Stashed volume "
"connector host is %(schost)s."),
LOG.error("Host mismatch detected in stashed volume "
"connector. Will use local volume connector. "
"Instance host is %(ihost)s. Local volume "
"connector host is %(chost)s. Stashed volume "
"connector host is %(schost)s.",
{'ihost': instance.host,
'chost': connector.get('host'),
'schost': stashed_connector.get('host')})
@ -392,8 +389,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
device_type=self['device_type'], encryption=encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"),
LOG.exception("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s",
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
instance=instance)
@ -424,11 +421,11 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
self['mount_device'],
encryption=encryption)
except Exception:
LOG.warning(_LW("Driver failed to detach volume "
"%(volume_id)s at %(mount_point)s."),
{'volume_id': volume_id,
'mount_point': self['mount_device']},
exc_info=True, instance=instance)
LOG.warning("Driver failed to detach volume "
"%(volume_id)s at %(mount_point)s.",
{'volume_id': volume_id,
'mount_point': self['mount_device']},
exc_info=True, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
@ -475,8 +472,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
volume_api.delete(context, volume_id)
except Exception as exc:
LOG.warning(
_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
'Failed to delete volume: %(volume_id)s '
'due to %(exc)s',
{'volume_id': volume_id, 'exc': exc})
@ -600,25 +597,25 @@ def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
instance = attach_args[1]
if bdm.get('volume_id'):
LOG.info(_LI('Booting with volume %(volume_id)s at '
'%(mountpoint)s'),
LOG.info('Booting with volume %(volume_id)s at '
'%(mountpoint)s',
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
instance=instance)
elif bdm.get('snapshot_id'):
LOG.info(_LI('Booting with volume snapshot %(snapshot_id)s at '
'%(mountpoint)s'),
LOG.info('Booting with volume snapshot %(snapshot_id)s at '
'%(mountpoint)s',
{'snapshot_id': bdm.snapshot_id,
'mountpoint': bdm['mount_device']},
instance=instance)
elif bdm.get('image_id'):
LOG.info(_LI('Booting with volume-backed-image %(image_id)s at '
'%(mountpoint)s'),
LOG.info('Booting with volume-backed-image %(image_id)s at '
'%(mountpoint)s',
{'image_id': bdm.image_id,
'mountpoint': bdm['mount_device']},
instance=instance)
else:
LOG.info(_LI('Booting with blank volume at %(mountpoint)s'),
LOG.info('Booting with blank volume at %(mountpoint)s',
{'mountpoint': bdm['mount_device']},
instance=instance)

View File

@ -38,8 +38,6 @@ from oslo_utils import units
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils
from nova.virt.disk.mount import api as mount
from nova.virt.disk.vfs import api as vfs
@ -243,8 +241,8 @@ def is_image_extendable(image):
# provides a bad configuration for libguestfs reported in
# the bug lp#1413142. When resolved we should remove this
# except to let the error to be propagated.
LOG.warning(_LW('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.'),
LOG.warning('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.',
{'image': image, 'error': e})
finally:
if fs is not None:
@ -407,8 +405,8 @@ def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
inject_val = items[inject]
if inject_val:
raise
LOG.warning(_LW('Ignoring error injecting data into image %(image)s '
'(%(e)s)'), {'image': image, 'e': e})
LOG.warning('Ignoring error injecting data into image %(image)s '
'(%(e)s)', {'image': image, 'e': e})
return False
try:
@ -432,8 +430,8 @@ def setup_container(image, container_dir):
img = _DiskImage(image=image, mount_dir=container_dir)
dev = img.mount()
if dev is None:
LOG.error(_LE("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s"),
LOG.error("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s",
{"image": img, "target": container_dir,
"errors": img.errors})
raise exception.NovaException(img.errors)
@ -465,7 +463,7 @@ def teardown_container(container_dir, container_root_device=None):
LOG.debug('No release necessary for block device %s',
container_root_device)
except Exception:
LOG.exception(_LE('Failed to teardown container filesystem'))
LOG.exception(_('Failed to teardown container filesystem'))
def clean_lxc_namespace(container_dir):
@ -478,7 +476,7 @@ def clean_lxc_namespace(container_dir):
img = _DiskImage(image=None, mount_dir=container_dir)
img.umount()
except Exception:
LOG.exception(_LE('Failed to umount container filesystem'))
LOG.exception(_('Failed to umount container filesystem'))
def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
@ -511,8 +509,8 @@ def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
except Exception as e:
if inject in mandatory:
raise
LOG.warning(_LW('Ignoring error injecting %(inject)s into '
'image (%(e)s)'), {'inject': inject, 'e': e})
LOG.warning('Ignoring error injecting %(inject)s into '
'image (%(e)s)', {'inject': inject, 'e': e})
status = False
return status

View File

@ -21,7 +21,7 @@ from oslo_service import loopingcall
from oslo_utils import importutils
from nova import exception
from nova.i18n import _, _LI, _LW
from nova.i18n import _
from nova import utils
from nova.virt.image import model as imgmodel
@ -167,11 +167,10 @@ class Mount(object):
start_time = time.time()
device = self._inner_get_dev()
while not device:
LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.'))
LOG.info('Device allocation failed. Will retry in 2 seconds.')
time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warning(_LW('Device allocation failed after repeated '
'retries.'))
LOG.warning('Device allocation failed after repeated retries.')
return False
device = self._inner_get_dev()
return True

View File

@ -15,7 +15,7 @@
from oslo_log import log as logging
from nova.i18n import _, _LI
from nova.i18n import _
from nova import utils
from nova.virt.disk.mount import api
@ -32,7 +32,7 @@ class LoopMount(api.Mount):
run_as_root=True)
if err:
self.error = _('Could not attach image to loopback: %s') % err
LOG.info(_LI('Loop mount error: %s'), self.error)
LOG.info('Loop mount error: %s', self.error)
self.linked = False
self.device = None
return False

View File

@ -21,7 +21,7 @@ import time
from oslo_log import log as logging
import nova.conf
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova import utils
from nova.virt.disk.mount import api
@ -46,14 +46,14 @@ class NbdMount(api.Mount):
if not os.path.exists('/var/lock/qemu-nbd-%s' % device):
return device
else:
LOG.error(_LE('NBD error - previous umount did not '
'cleanup /var/lock/qemu-nbd-%s.'), device)
LOG.warning(_LW('No free nbd devices'))
LOG.error('NBD error - previous umount did not '
'cleanup /var/lock/qemu-nbd-%s.', device)
LOG.warning('No free nbd devices')
return None
def _allocate_nbd(self):
if not os.path.exists('/sys/block/nbd0'):
LOG.error(_LE('nbd module not loaded'))
LOG.error('nbd module not loaded')
self.error = _('nbd unavailable: module not loaded')
return None
@ -81,7 +81,7 @@ class NbdMount(api.Mount):
run_as_root=True)
if err:
self.error = _('qemu-nbd error: %s') % err
LOG.info(_LI('NBD mount error: %s'), self.error)
LOG.info('NBD mount error: %s', self.error)
return False
# NOTE(vish): this forks into another process, so give it a chance
@ -94,14 +94,14 @@ class NbdMount(api.Mount):
time.sleep(1)
else:
self.error = _('nbd device %s did not show up') % device
LOG.info(_LI('NBD mount error: %s'), self.error)
LOG.info('NBD mount error: %s', self.error)
# Cleanup
_out, err = utils.trycmd('qemu-nbd', '-d', device,
run_as_root=True)
if err:
LOG.warning(_LW('Detaching from erroneous nbd device returned '
'error: %s'), err)
LOG.warning('Detaching from erroneous nbd device returned '
'error: %s', err)
return False
self.error = ''

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
from oslo_utils import importutils
from nova import exception
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
@ -69,8 +68,8 @@ class VFS(object):
# check for capabilities.
raise
else:
LOG.info(_LI("Unable to import guestfs, "
"falling back to VFSLocalFS"))
LOG.info("Unable to import guestfs, "
"falling back to VFSLocalFS")
return importutils.import_object(
"nova.virt.disk.vfs.localfs.VFSLocalFS",

View File

@ -22,7 +22,6 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.virt.disk.vfs import api as vfs
from nova.virt.image import model as imgmodel
@ -93,8 +92,8 @@ class VFSGuestFS(vfs.VFS):
def configure_debug(self):
"""Configures guestfs to be verbose."""
if not self.handle:
LOG.warning(_LW("Please consider to execute setup before trying "
"to configure debug log message."))
LOG.warning("Please consider to execute setup before trying "
"to configure debug log message.")
else:
def log_callback(ev, eh, buf, array):
if ev == guestfs.EVENT_APPLIANCE:
@ -197,8 +196,8 @@ class VFSGuestFS(vfs.VFS):
except AttributeError as ex:
# set_backend_settings method doesn't exist in older
# libguestfs versions, so nothing we can do but ignore
LOG.warning(_LW("Unable to force TCG mode, "
"libguestfs too old? %s"), ex)
LOG.warning("Unable to force TCG mode, "
"libguestfs too old? %s", ex)
pass
try:
@ -246,7 +245,7 @@ class VFSGuestFS(vfs.VFS):
if self.mount:
self.handle.aug_close()
except RuntimeError as e:
LOG.warning(_LW("Failed to close augeas %s"), e)
LOG.warning("Failed to close augeas %s", e)
try:
self.handle.shutdown()
@ -254,7 +253,7 @@ class VFSGuestFS(vfs.VFS):
# Older libguestfs versions haven't an explicit shutdown
pass
except RuntimeError as e:
LOG.warning(_LW("Failed to shutdown appliance %s"), e)
LOG.warning("Failed to shutdown appliance %s", e)
try:
self.handle.close()
@ -262,7 +261,7 @@ class VFSGuestFS(vfs.VFS):
# Older libguestfs versions haven't an explicit close
pass
except RuntimeError as e:
LOG.warning(_LW("Failed to close guest handle %s"), e)
LOG.warning("Failed to close guest handle %s", e)
finally:
# dereference object and implicitly close()
self.handle = None

View File

@ -27,7 +27,7 @@ from oslo_utils import importutils
import six
import nova.conf
from nova.i18n import _, _LE, _LI
from nova.i18n import _
from nova.virt import event as virtevent
CONF = nova.conf.CONF
@ -1444,7 +1444,7 @@ class ComputeDriver(object):
LOG.debug("Emitting event %s", six.text_type(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
LOG.error("Exception dispatching event %(event)s: %(ex)s",
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
@ -1613,10 +1613,10 @@ def load_compute_driver(virtapi, compute_driver=None):
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_LE("Compute driver option required, but not specified"))
LOG.error("Compute driver option required, but not specified")
sys.exit(1)
LOG.info(_LI("Loading compute driver '%s'"), compute_driver)
LOG.info("Loading compute driver '%s'", compute_driver)
try:
driver = importutils.import_object(
'nova.virt.%s' % compute_driver,
@ -1625,7 +1625,7 @@ def load_compute_driver(virtapi, compute_driver=None):
return driver
raise ValueError()
except ImportError:
LOG.exception(_LE("Unable to load the virtualization driver"))
LOG.exception(_("Unable to load the virtualization driver"))
sys.exit(1)
except ValueError:
LOG.exception("Compute driver '%s' from 'nova.virt' is not of type"

View File

@ -35,7 +35,6 @@ from nova.compute import task_states
import nova.conf
from nova.console import type as ctype
from nova import exception
from nova.i18n import _LW
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields as obj_fields
from nova.virt import driver
@ -274,7 +273,7 @@ class FakeDriver(driver.ComputeDriver):
disk=flavor.root_gb)
del self.instances[key]
else:
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"),
LOG.warning("Key '%(key)s' not in instances '%(inst)s'",
{'key': key,
'inst': self.instances}, instance=instance)

View File

@ -21,7 +21,6 @@ from oslo_utils import importutils
from nova.compute import utils as compute_utils
import nova.conf
from nova import context
from nova.i18n import _LI
from nova.network import linux_net
from nova import objects
from nova import utils
@ -137,8 +136,8 @@ class IptablesFirewallDriver(FirewallDriver):
self.remove_filters_for_instance(instance)
self.iptables.apply()
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
LOG.info('Attempted to unfilter instance which is not filtered',
instance=instance)
def prepare_instance_filter(self, instance, network_info):
self.instance_info[instance.id] = (instance, network_info)
@ -389,10 +388,8 @@ class IptablesFirewallDriver(FirewallDriver):
ipv6_rules):
chain_name = self._instance_chain_name(instance)
if not self.iptables.ipv4['filter'].has_chain(chain_name):
LOG.info(
_LI('instance chain %s disappeared during refresh, '
'skipping'), chain_name,
instance=instance)
LOG.info('instance chain %s disappeared during refresh, skipping',
chain_name, instance=instance)
return
self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance, network_info, ipv4_rules,

View File

@ -25,7 +25,7 @@ import six
import nova.conf
from nova import context
from nova import exception
from nova.i18n import _, _LI
from nova.i18n import _
from nova import objects
from nova.objects import fields
from nova.objects import instance as obj_instance
@ -784,8 +784,8 @@ def _pack_instance_onto_cores(available_siblings,
# vcpus_pinning=[(2, 0), (3, 4)]
vcpus_pinning = list(zip(sorted(instance_cores),
itertools.chain(*usable_cores)))
msg = _LI("Computed NUMA topology CPU pinning: usable pCPUs: "
"%(usable_cores)s, vCPUs mapping: %(vcpus_pinning)s")
msg = ("Computed NUMA topology CPU pinning: usable pCPUs: "
"%(usable_cores)s, vCPUs mapping: %(vcpus_pinning)s")
msg_args = {
'usable_cores': usable_cores,
'vcpus_pinning': vcpus_pinning,
@ -809,8 +809,8 @@ def _pack_instance_onto_cores(available_siblings,
# cpuset_reserved=[4]
cpuset_reserved = set(list(
itertools.chain(*usable_cores))[:num_cpu_reserved])
msg = _LI("Computed NUMA topology reserved pCPUs: usable pCPUs: "
"%(usable_cores)s, reserved pCPUs: %(cpuset_reserved)s")
msg = ("Computed NUMA topology reserved pCPUs: usable pCPUs: "
"%(usable_cores)s, reserved pCPUs: %(cpuset_reserved)s")
msg_args = {
'usable_cores': usable_cores,
'cpuset_reserved': cpuset_reserved,
@ -943,9 +943,9 @@ def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell,
else:
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.REQUIRE):
LOG.info(_LI("Host does not support hyperthreading or "
"hyperthreading is disabled, but 'require' "
"threads policy was requested."))
LOG.info("Host does not support hyperthreading or "
"hyperthreading is disabled, but 'require' "
"threads policy was requested.")
return
# Straightforward to pin to available cpus when there is no

View File

@ -27,7 +27,6 @@ from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LE
from nova.virt import driver
from nova.virt.hyperv import eventhandler
from nova.virt.hyperv import hostops
@ -123,10 +122,10 @@ class HyperVDriver(driver.ComputeDriver):
# the version is of Windows is older than Windows Server 2012 R2.
# Log an error, letting users know that this version is not
# supported any longer.
LOG.error(_LE('You are running nova-compute on an unsupported '
'version of Windows (older than Windows / Hyper-V '
'Server 2012). The support for this version of '
'Windows has been removed in Mitaka.'))
LOG.error('You are running nova-compute on an unsupported '
'version of Windows (older than Windows / Hyper-V '
'Server 2012). The support for this version of '
'Windows has been removed in Mitaka.')
raise exception.HypervisorTooOld(version='6.2')
@property

View File

@ -19,7 +19,6 @@ from os_win import utilsfactory
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova import utils
from nova.virt import event as virtevent
from nova.virt.hyperv import serialconsoleops
@ -83,10 +82,9 @@ class InstanceEventHandler(object):
try:
instance_uuid = self._vmutils.get_instance_uuid(instance_name)
if not instance_uuid:
LOG.warning(_LW("Instance uuid could not be retrieved for "
"instance %s. Instance state change event "
"will be ignored."),
instance_name)
LOG.warning("Instance uuid could not be retrieved for "
"instance %s. Instance state change event "
"will be ignored.", instance_name)
return instance_uuid
except os_win_exc.HyperVVMNotFoundException:
# The instance has been deleted.

View File

@ -26,7 +26,7 @@ from oslo_utils import uuidutils
import nova.conf
from nova import exception
from nova.i18n import _, _LI
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import pathutils
from nova.virt import imagecache
@ -201,7 +201,7 @@ class ImageCache(imagecache.ImageCacheManager):
for img in backing_files:
age_seconds = self._pathutils.get_age_of_file(img)
if age_seconds > max_age_seconds:
LOG.info(_LI("Removing old, unused image: %s"), img)
LOG.info("Removing old, unused image: %s", img)
self._remove_old_image(img)
def _remove_old_image(self, image_path):

View File

@ -24,7 +24,7 @@ from oslo_utils import excutils
from oslo_utils import units
from nova import exception
from nova.i18n import _, _LW, _LE
from nova.i18n import _
from nova import objects
from nova.virt import configdrive
from nova.virt.hyperv import block_device_manager
@ -98,7 +98,7 @@ class MigrationOps(object):
except Exception as ex:
# Log and ignore this exception
LOG.exception(ex)
LOG.error(_LE("Cannot cleanup migration files"))
LOG.error("Cannot cleanup migration files")
def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor.root_gb
@ -314,8 +314,8 @@ class MigrationOps(object):
elif sum(eph['size'] for eph in ephemerals) != new_eph_gb:
# New ephemeral size is different from the original ephemeral size
# and there are multiple ephemerals.
LOG.warning(_LW("Cannot resize multiple ephemeral disks for "
"instance."), instance=instance)
LOG.warning("Cannot resize multiple ephemeral disks for instance.",
instance=instance)
for index, eph in enumerate(ephemerals):
eph_name = "eph%s" % index

View File

@ -22,7 +22,7 @@ from oslo_log import log as logging
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import exception
from nova.i18n import _, _LI
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialproxy
@ -76,12 +76,12 @@ class SerialConsoleHandler(object):
self._listen_port = serial_console.acquire_port(
self._listen_host)
LOG.info(_LI('Initializing serial proxy on '
'%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s.'),
{'addr': self._listen_host,
'port': self._listen_port,
'instance_name': self._instance_name})
LOG.info('Initializing serial proxy on '
'%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s.',
{'addr': self._listen_host,
'port': self._listen_port,
'instance_name': self._instance_name})
# Use this event in order to manage
# pending queue operations.

View File

@ -21,7 +21,6 @@ from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LI, _LE
from nova import utils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialconsolehandler
@ -58,8 +57,8 @@ class SerialConsoleOps(object):
handler.start()
_console_handlers[instance_name] = handler
except Exception as exc:
LOG.error(_LE('Instance %(instance_name)s serial console handler '
'could not start. Exception %(exc)s'),
LOG.error('Instance %(instance_name)s serial console handler '
'could not start. Exception %(exc)s',
{'instance_name': instance_name,
'exc': exc})
if handler:
@ -72,8 +71,8 @@ class SerialConsoleOps(object):
def stop_console_handler_unsync(self, instance_name):
handler = _console_handlers.get(instance_name)
if handler:
LOG.info(_LI("Stopping instance %(instance_name)s "
"serial console handler."),
LOG.info("Stopping instance %(instance_name)s "
"serial console handler.",
{'instance_name': instance_name})
handler.stop()
del _console_handlers[instance_name]

View File

@ -22,7 +22,7 @@ from os_win import utilsfactory
from oslo_log import log as logging
from nova.compute import task_states
from nova.i18n import _LE
from nova.i18n import _
from nova.image import glance
from nova.virt.hyperv import pathutils
@ -112,7 +112,7 @@ class SnapshotOps(object):
LOG.debug("Removing snapshot %s", image_id)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception:
LOG.exception(_LE('Failed to remove snapshot for VM %s'),
LOG.exception(_('Failed to remove snapshot for VM %s'),
instance_name, instance=instance)
if export_dir:
LOG.debug('Removing directory: %s', export_dir)

View File

@ -38,7 +38,7 @@ from nova.api.metadata import base as instance_metadata
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova.i18n import _
from nova import objects
from nova.objects import fields
from nova import utils
@ -268,7 +268,7 @@ class VMOps(object):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance)
LOG.info("Spawning new instance", instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
@ -319,13 +319,13 @@ class VMOps(object):
yield
except etimeout.Timeout:
# We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
'instance.'), instance=instance)
LOG.warning('Timeout waiting for vif plugging callback for '
'instance.', instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event %s'),
LOG.error('Neutron Reported failure on event %s',
event_name, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
@ -357,10 +357,10 @@ class VMOps(object):
"has to be disabled in order for the instance to "
"benefit from it.", instance=instance)
if CONF.hyperv.dynamic_memory_ratio > 1.0:
LOG.warning(_LW(
LOG.warning(
"Instance vNUMA topology requested, but dynamic memory "
"ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
"memory ratio option."), instance=instance)
"memory ratio option.", instance=instance)
dynamic_memory_ratio = 1.0
vnuma_enabled = True
else:
@ -549,8 +549,8 @@ class VMOps(object):
image_prop_vm = image_meta.properties.get('hw_machine_type',
default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types():
reason = _LE('Requested VM Generation %s is not supported on '
'this OS.') % image_prop_vm
reason = _('Requested VM Generation %s is not supported on '
'this OS.') % image_prop_vm
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
@ -560,8 +560,8 @@ class VMOps(object):
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD):
reason = _LE('Requested VM Generation %s, but provided VHD '
'instead of VHDX.') % vm_gen
reason = _('Requested VM Generation %s, but provided VHD '
'instead of VHDX.') % vm_gen
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
@ -628,7 +628,7 @@ class VMOps(object):
raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format)
LOG.info(_LI('Using config drive for instance'), instance=instance)
LOG.info('Using config drive for instance', instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
@ -640,7 +640,7 @@ class VMOps(object):
configdrive_path_iso = self._pathutils.get_configdrive_path(
instance.name, constants.DVD_FORMAT, rescue=rescue)
LOG.info(_LI('Creating config drive at %(path)s'),
LOG.info('Creating config drive at %(path)s',
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
@ -648,9 +648,8 @@ class VMOps(object):
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with '
'error: %s'),
e, instance=instance)
LOG.error('Creating config drive failed with '
'error: %s', e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = self._pathutils.get_configdrive_path(
@ -701,7 +700,7 @@ class VMOps(object):
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance.name
LOG.info(_LI("Got request to destroy instance"), instance=instance)
LOG.info("Got request to destroy instance", instance=instance)
try:
if self._vmutils.vm_exists(instance_name):
@ -718,7 +717,7 @@ class VMOps(object):
self._delete_disk_files(instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to destroy instance: %s'),
LOG.exception(_('Failed to destroy instance: %s'),
instance_name)
def reboot(self, instance, network_info, reboot_type):
@ -754,7 +753,7 @@ class VMOps(object):
timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time):
LOG.info(_LI("Soft shutdown succeeded."),
LOG.info("Soft shutdown succeeded.",
instance=instance)
return True
except os_win_exc.HyperVException as e:
@ -765,7 +764,7 @@ class VMOps(object):
timeout -= retry_interval
LOG.warning(_LW("Timed out while waiting for soft shutdown."),
LOG.warning("Timed out while waiting for soft shutdown.",
instance=instance)
return False
@ -842,8 +841,8 @@ class VMOps(object):
'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to change vm state of %(instance_name)s"
" to %(req_state)s"),
LOG.error("Failed to change vm state of %(instance_name)s"
" to %(req_state)s",
{'instance_name': instance_name,
'req_state': req_state})
@ -966,9 +965,9 @@ class VMOps(object):
image_meta, rescue_password)
except Exception as exc:
with excutils.save_and_reraise_exception():
err_msg = _LE("Instance rescue failed. Exception: %(exc)s. "
"Attempting to unrescue the instance.")
LOG.error(err_msg, {'exc': exc}, instance=instance)
LOG.error("Instance rescue failed. Exception: %(exc)s. "
"Attempting to unrescue the instance.",
{'exc': exc}, instance=instance)
self.unrescue_instance(instance)
def _rescue_instance(self, context, instance, network_info, image_meta,

View File

@ -26,7 +26,7 @@ from oslo_utils import strutils
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova import utils
from nova.virt import driver
from nova.virt.hyperv import constants
@ -77,10 +77,10 @@ class VolumeOps(object):
tries_left -= 1
if not tries_left:
LOG.exception(
_LE("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "),
{'connection_info': strutils.mask_dict_password(
connection_info),
_("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "),
{'connection_info':
strutils.mask_dict_password(connection_info),
'instance_name': instance_name})
self.disconnect_volume(connection_info)
@ -89,9 +89,9 @@ class VolumeOps(object):
reason=ex)
else:
LOG.warning(
_LW("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "
"Tries left: %(tries_left)s."),
"Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "
"Tries left: %(tries_left)s.",
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name,
@ -194,12 +194,11 @@ class VolumeOps(object):
unsupported_specs = set(qos_specs.keys()).difference(
supported_qos_specs)
if unsupported_specs:
msg = (_LW('Got unsupported QoS specs: '
LOG.warning('Got unsupported QoS specs: '
'%(unsupported_specs)s. '
'Supported qos specs: %(supported_qos_specs)s') %
{'unsupported_specs': unsupported_specs,
'supported_qos_specs': supported_qos_specs})
LOG.warning(msg)
'Supported qos specs: %(supported_qos_specs)s',
{'unsupported_specs': unsupported_specs,
'supported_qos_specs': supported_qos_specs})
class BaseVolumeDriver(object):
@ -302,8 +301,8 @@ class BaseVolumeDriver(object):
return ctrller_path, slot
def set_disk_qos_specs(self, connection_info, disk_qos_specs):
LOG.info(_LI("The %(protocol)s Hyper-V volume driver "
"does not support QoS. Ignoring QoS specs."),
LOG.info("The %(protocol)s Hyper-V volume driver "
"does not support QoS. Ignoring QoS specs.",
dict(protocol=self._protocol))

View File

@ -41,9 +41,6 @@ from nova.console import type as console_type
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import objects
from nova.objects import fields as obj_fields
from nova import servicegroup
@ -90,7 +87,7 @@ def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
LOG.warning("Power state %s not found.", state)
return power_state.NOSTATE
@ -217,8 +214,8 @@ class IronicDriver(virt_driver.ComputeDriver):
try:
properties[prop] = int(node.properties.get(prop, 0))
except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer.'),
LOG.warning('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer.',
{'uuid': node.uuid, 'prop': prop})
properties[prop] = 0
@ -228,7 +225,7 @@ class IronicDriver(virt_driver.ComputeDriver):
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid)
LOG.warning("cpu_arch not defined for node '%s'", node.uuid)
properties['cpu_arch'] = cpu_arch
properties['raw_cpu_arch'] = raw_cpu_arch
@ -253,9 +250,9 @@ class IronicDriver(virt_driver.ComputeDriver):
instance_info[prop] = int(node.instance_info.get(prop,
original))
except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer but its value '
'is "%(value)s".'),
LOG.warning('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer but its value '
'is "%(value)s".',
{'uuid': node.uuid, 'prop': prop,
'value': node.instance_info.get(prop)})
instance_info[prop] = original
@ -298,8 +295,8 @@ class IronicDriver(virt_driver.ComputeDriver):
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0].strip()] = parts[1]
else:
LOG.warning(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
LOG.warning("Ignoring malformed capability '%s'. "
"Format should be 'key:val'.", capability)
vcpus_used = 0
memory_mb_used = 0
@ -389,9 +386,9 @@ class IronicDriver(virt_driver.ComputeDriver):
try:
self.ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest as e:
LOG.warning(_LW("Failed to remove deploy parameters from node "
"%(node)s when unprovisioning the instance "
"%(instance)s: %(reason)s"),
LOG.warning("Failed to remove deploy parameters from node "
"%(node)s when unprovisioning the instance "
"%(instance)s: %(reason)s",
{'node': node.uuid, 'instance': instance.uuid,
'reason': six.text_type(e)})
@ -730,15 +727,15 @@ class IronicDriver(virt_driver.ComputeDriver):
properties = self._parse_node_properties(node)
memory_kib = properties['memory_mb'] * 1024
if memory_kib == 0:
LOG.warning(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
LOG.warning("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s.",
{'instance': instance.uuid,
'node': instance.node})
num_cpu = properties['cpus']
if num_cpu == 0:
LOG.warning(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
LOG.warning("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s.",
{'instance': instance.uuid,
'node': instance.node})
@ -912,8 +909,8 @@ class IronicDriver(virt_driver.ComputeDriver):
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
LOG.error("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s.",
{'instance': instance.uuid,
'node': node_uuid})
self._cleanup_deploy(node, instance, network_info)
@ -931,14 +928,14 @@ class IronicDriver(virt_driver.ComputeDriver):
files=injected_files)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to build configdrive: %s") %
msg = ("Failed to build configdrive: %s" %
six.text_type(e))
LOG.error(msg, instance=instance)
self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI("Config drive for instance %(instance)s on "
"baremetal node %(node)s created."),
{'instance': instance['uuid'], 'node': node_uuid})
LOG.info("Config drive for instance %(instance)s on "
"baremetal node %(node)s created.",
{'instance': instance['uuid'], 'node': node_uuid})
# trigger the node deploy
try:
@ -947,25 +944,24 @@ class IronicDriver(virt_driver.ComputeDriver):
configdrive=configdrive_value)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"),
{'inst': instance.uuid,
'reason': six.text_type(e)})
LOG.error(msg)
LOG.error("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s",
{'inst': instance.uuid,
'reason': six.text_type(e)})
self._cleanup_deploy(node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully provisioned Ironic node %s'),
LOG.info('Successfully provisioned Ironic node %s',
node.uuid, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
LOG.error("Error deploying instance %(instance)s on "
"baremetal node %(node)s.",
{'instance': instance.uuid,
'node': node_uuid})
def _unprovision(self, instance, node):
"""This method is called from destroy() to unprovision
@ -1039,7 +1035,7 @@ class IronicDriver(virt_driver.ComputeDriver):
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
LOG.warning("Destroy called on non-existing instance %s.",
instance.uuid)
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
@ -1056,7 +1052,7 @@ class IronicDriver(virt_driver.ComputeDriver):
self._remove_instance_info_from_node(node, instance)
self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI('Successfully unprovisioned Ironic node %s'),
LOG.info('Successfully unprovisioned Ironic node %s',
node.uuid, instance=instance)
def reboot(self, context, instance, network_info, reboot_type,
@ -1088,8 +1084,8 @@ class IronicDriver(virt_driver.ComputeDriver):
'reboot', soft=True)
hard = False
except ironic.exc.BadRequest as exc:
LOG.info(_LI('Soft reboot is not supported by ironic hardware '
'driver. Falling back to hard reboot: %s'),
LOG.info('Soft reboot is not supported by ironic hardware '
'driver. Falling back to hard reboot: %s',
exc,
instance=instance)
@ -1099,8 +1095,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully rebooted(type %(type)s) Ironic node '
'%(node)s'),
LOG.info('Successfully rebooted(type %(type)s) Ironic node %(node)s',
{'type': ('HARD' if hard else 'SOFT'),
'node': node.uuid},
instance=instance)
@ -1131,25 +1126,24 @@ class IronicDriver(virt_driver.ComputeDriver):
timer.start(interval=CONF.ironic.api_retry_interval).wait()
node = self._validate_instance_and_node(instance)
if node.power_state == ironic_states.POWER_OFF:
LOG.info(_LI('Successfully soft powered off Ironic node '
'%s'),
LOG.info('Successfully soft powered off Ironic node %s',
node.uuid, instance=instance)
return
LOG.info(_LI("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s "
"within the required timeout %(timeout)d "
"seconds due to error: %(reason)s. "
"Attempting hard power off."),
LOG.info("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s "
"within the required timeout %(timeout)d "
"seconds due to error: %(reason)s. "
"Attempting hard power off.",
{'instance': instance.uuid,
'timeout': timeout,
'node': node.uuid,
'reason': node.last_error},
instance=instance)
except ironic.exc.ClientException as e:
LOG.info(_LI("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s "
"due to error: %(reason)s. "
"Attempting hard power off."),
LOG.info("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s "
"due to error: %(reason)s. "
"Attempting hard power off.",
{'instance': instance.uuid,
'node': node.uuid,
'reason': e},
@ -1159,7 +1153,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully hard powered off Ironic node %s'),
LOG.info('Successfully hard powered off Ironic node %s',
node.uuid, instance=instance)
def power_on(self, context, instance, network_info,
@ -1184,7 +1178,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered on Ironic node %s'),
LOG.info('Successfully powered on Ironic node %s',
node.uuid, instance=instance)
def trigger_crash_dump(self, instance):
@ -1202,7 +1196,7 @@ class IronicDriver(virt_driver.ComputeDriver):
self.ironicclient.call("node.inject_nmi", node.uuid)
LOG.info(_LI('Successfully triggered crash dump into Ironic node %s'),
LOG.info('Successfully triggered crash dump into Ironic node %s',
node.uuid, instance=instance)
def refresh_security_group_rules(self, security_group_id):
@ -1379,7 +1373,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Instance was successfully rebuilt'), instance=instance)
LOG.info('Instance was successfully rebuilt', instance=instance)
def network_binding_host_id(self, context, instance):
"""Get host ID to associate with network ports.
@ -1434,10 +1428,9 @@ class IronicDriver(virt_driver.ComputeDriver):
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
LOG.error(_LE('Failed to acquire console information for '
'instance %(inst)s: %(reason)s'),
{'inst': instance.uuid,
'reason': e})
LOG.error('Failed to acquire console information for '
'instance %(inst)s: %(reason)s',
{'inst': instance.uuid, 'reason': e})
raise exception.ConsoleNotAvailable()
def _wait_state(state):
@ -1459,8 +1452,8 @@ class IronicDriver(virt_driver.ComputeDriver):
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
LOG.error(_LE('Failed to set console mode to "%(mode)s" '
'for instance %(inst)s: %(reason)s'),
LOG.error('Failed to set console mode to "%(mode)s" '
'for instance %(inst)s: %(reason)s',
{'mode': mode,
'inst': instance.uuid,
'reason': e})
@ -1474,8 +1467,8 @@ class IronicDriver(virt_driver.ComputeDriver):
timeout=CONF.ironic.serial_console_state_timeout,
jitter=0.5).wait()
except loopingcall.LoopingCallTimeOut:
LOG.error(_LE('Timeout while waiting for console mode to be '
'set to "%(mode)s" on node %(node)s'),
LOG.error('Timeout while waiting for console mode to be '
'set to "%(mode)s" on node %(node)s',
{'mode': mode,
'node': node_uuid})
raise exception.ConsoleNotAvailable()
@ -1528,8 +1521,8 @@ class IronicDriver(virt_driver.ComputeDriver):
console_info = result['console_info']
if console_info["type"] != "socat":
LOG.warning(_LW('Console type "%(type)s" (of ironic node '
'%(node)s) does not support Nova serial console'),
LOG.warning('Console type "%(type)s" (of ironic node '
'%(node)s) does not support Nova serial console',
{'type': console_info["type"],
'node': node.uuid},
instance=instance)
@ -1544,8 +1537,8 @@ class IronicDriver(virt_driver.ComputeDriver):
if not (scheme and hostname and port):
raise AssertionError()
except (ValueError, AssertionError):
LOG.error(_LE('Invalid Socat console URL "%(url)s" '
'(ironic node %(node)s)'),
LOG.error('Invalid Socat console URL "%(url)s" '
'(ironic node %(node)s)',
{'url': console_info["url"],
'node': node.uuid},
instance=instance)
@ -1555,8 +1548,8 @@ class IronicDriver(virt_driver.ComputeDriver):
return console_type.ConsoleSerial(host=hostname,
port=port)
else:
LOG.warning(_LW('Socat serial console only supports "tcp". '
'This URL is "%(url)s" (ironic node %(node)s).'),
LOG.warning('Socat serial console only supports "tcp". '
'This URL is "%(url)s" (ironic node %(node)s).',
{'url': console_info["url"],
'node': node.uuid},
instance=instance)

View File

@ -12,8 +12,6 @@
from oslo_log import log as logging
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
@ -32,7 +30,7 @@ def get_domain_info(libvirt, host, virt_dom):
return virt_dom.info()
except libvirt.libvirtError as e:
if not host.has_min_version((1, 2, 11)) and is_race(e):
LOG.warning(_LW('Race detected in libvirt.virDomain.info, '
'trying one more time'))
LOG.warning('Race detected in libvirt.virDomain.info, '
'trying one more time')
return virt_dom.info()
raise

File diff suppressed because it is too large Load Diff

View File

@ -24,8 +24,6 @@ from oslo_utils import excutils
from oslo_utils import importutils
import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
import nova.virt.firewall as base_firewall
from nova.virt import netutils
@ -55,8 +53,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
try:
libvirt = importutils.import_module('libvirt')
except ImportError:
LOG.warning(_LW("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
LOG.warning("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly.")
self._host = host
self.static_filters_configured = False
@ -109,10 +107,10 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
LOG.info('Called setup_basic_filtering in nwfilter',
instance=instance)
LOG.info(_LI('Ensuring static filters'), instance=instance)
LOG.info('Ensuring static filters', instance=instance)
self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False)
@ -281,9 +279,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in use
# (ie. when the instance has not terminated properly)
LOG.info(_LI('Failed to undefine network filter '
'%(name)s. Try %(cnt)d of '
'%(max_retry)d.'),
LOG.info('Failed to undefine network filter '
'%(name)s. Try %(cnt)d of %(max_retry)d.',
{'name': instance_filter_name,
'cnt': cnt + 1,
'max_retry': max_retry},
@ -349,8 +346,8 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
LOG.info('Attempted to unfilter instance which is not filtered',
instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""

View File

@ -40,8 +40,6 @@ import six
from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils
from nova.virt import hardware
from nova.virt.libvirt import compat
@ -127,7 +125,7 @@ class Guest(object):
guest = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a guest with XML: %s'),
LOG.error('Error defining a guest with XML: %s',
encodeutils.safe_decode(xml))
return guest
@ -141,8 +139,8 @@ class Guest(object):
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain '
'with XML: %s'),
LOG.error('Error launching a defined domain '
'with XML: %s',
self._encoded_xml, errors='ignore')
def poweroff(self):
@ -177,7 +175,7 @@ class Guest(object):
LOG.debug('Failed to set time: agent not configured',
instance_uuid=self.uuid)
else:
LOG.warning(_LW('Failed to set time: %(reason)s'),
LOG.warning('Failed to set time: %(reason)s',
{'reason': e}, instance_uuid=self.uuid)
except Exception as ex:
# The highest priority is not to let this method crash and thus
@ -210,7 +208,7 @@ class Guest(object):
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s'),
LOG.error('Error enabling hairpin mode with XML: %s',
self._encoded_xml, errors='ignore')
def get_interfaces(self):

View File

@ -48,9 +48,6 @@ import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
@ -149,7 +146,7 @@ class Host(object):
try:
handler()
except Exception:
LOG.exception(_LE('Exception handling connection event'))
LOG.exception(_('Exception handling connection event'))
finally:
self._conn_event_handler_queue.task_done()
@ -378,8 +375,8 @@ class Host(object):
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
@ -394,9 +391,9 @@ class Host(object):
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warning(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s",
{'uri': self._uri, 'error': e})
return wrapped_conn
@ -453,7 +450,7 @@ class Host(object):
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
@ -637,7 +634,7 @@ class Host(object):
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
LOG.info("Libvirt host capabilities %s", xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
@ -658,8 +655,8 @@ class Host(object):
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
@ -689,10 +686,9 @@ class Host(object):
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
@ -750,7 +746,7 @@ class Host(object):
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
@ -800,8 +796,8 @@ class Host(object):
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warning(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s"),
LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
# skip dom0

View File

@ -31,7 +31,6 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE, _LI, _LW
from nova import image
from nova import keymgr
from nova import utils
@ -248,8 +247,8 @@ class Image(object):
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
LOG.warning(_LW('Unable to preallocate image at path: '
'%(path)s'), {'path': self.path})
LOG.warning('Unable to preallocate image at path: %(path)s',
{'path': self.path})
return can_fallocate
def verify_base_size(self, base, size, base_size=0):
@ -274,11 +273,11 @@ class Image(object):
base_size = self.get_disk_size(base)
if size < base_size:
msg = _LE('%(base)s virtual size %(base_size)s '
'larger than flavor root disk size %(size)s')
LOG.error(msg, {'base': base,
'base_size': base_size,
'size': size})
LOG.error('%(base)s virtual size %(base_size)s '
'larger than flavor root disk size %(size)s',
{'base': base,
'base_size': base_size,
'size': size})
raise exception.FlavorDiskSmallerThanImage(
flavor_size=size, image_size=base_size)
@ -483,10 +482,9 @@ class Flat(Image):
data = images.qemu_img_info(self.path)
return data.file_format
except exception.InvalidDiskInfo as e:
LOG.info(_LI('Failed to get image info from path %(path)s; '
'error: %(error)s'),
{'path': self.path,
'error': e})
LOG.info('Failed to get image info from path %(path)s; '
'error: %(error)s',
{'path': self.path, 'error': e})
return 'raw'
def _supports_encryption(self):
@ -728,8 +726,8 @@ class Lvm(Image):
self.ephemeral_key_uuid).get_encoded()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to retrieve ephemeral encryption"
" key"))
LOG.error("Failed to retrieve ephemeral "
"encryption key")
else:
raise exception.InternalError(
_("Instance disk to be encrypted but no context provided"))

View File

@ -32,9 +32,6 @@ from oslo_utils import encodeutils
import six
import nova.conf
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import utils
from nova.virt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
@ -197,10 +194,9 @@ class ImageCacheManager(imagecache.ImageCacheManager):
inuse_images.append(backing_path)
if backing_path in self.unexplained_images:
LOG.warning(_LW('Instance %(instance)s is using a '
'backing file %(backing)s which '
'does not appear in the image '
'service'),
LOG.warning('Instance %(instance)s is using a '
'backing file %(backing)s which '
'does not appear in the image service',
{'instance': ent,
'backing': backing_file})
self.unexplained_images.remove(backing_path)
@ -261,7 +257,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if not exists or age < maxage:
return
LOG.info(_LI('Removing base or swap file: %s'), base_file)
LOG.info('Removing base or swap file: %s', base_file)
try:
os.remove(base_file)
@ -279,14 +275,13 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if os.path.exists(signature):
os.remove(signature)
except OSError as e:
LOG.error(_LE('Failed to remove %(base_file)s, '
'error was %(error)s'),
LOG.error('Failed to remove %(base_file)s, '
'error was %(error)s',
{'base_file': base_file,
'error': e})
if age < maxage:
LOG.info(_LI('Base or swap file too young to remove: %s'),
base_file)
LOG.info('Base or swap file too young to remove: %s', base_file)
else:
_inner_remove_old_enough_file()
if remove_lock:
@ -321,7 +316,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
def _mark_in_use(self, img_id, base_file):
"""Mark a single base image as in use."""
LOG.info(_LI('image %(id)s at (%(base_file)s): checking'),
LOG.info('image %(id)s at (%(base_file)s): checking',
{'id': img_id, 'base_file': base_file})
if base_file in self.unexplained_images:
@ -345,8 +340,8 @@ class ImageCacheManager(imagecache.ImageCacheManager):
error_images = self.used_swap_images - self.back_swap_images
for error_image in error_images:
LOG.warning(_LW('%s swap image was used by instance'
' but no back files existing!'), error_image)
LOG.warning('%s swap image was used by instance'
' but no back files existing!', error_image)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
LOG.debug('Verify base images')
@ -368,16 +363,16 @@ class ImageCacheManager(imagecache.ImageCacheManager):
# Anything left is an unknown base image
for img in self.unexplained_images:
LOG.warning(_LW('Unknown base file: %s'), img)
LOG.warning('Unknown base file: %s', img)
self.removable_base_files.append(img)
# Dump these lists
if self.active_base_files:
LOG.info(_LI('Active base files: %s'),
LOG.info('Active base files: %s',
' '.join(self.active_base_files))
if self.removable_base_files:
LOG.info(_LI('Removable base files: %s'),
LOG.info('Removable base files: %s',
' '.join(self.removable_base_files))
if self.remove_unused_base_images:

View File

@ -20,9 +20,6 @@ import signal
from oslo_log import log as logging
from nova.i18n import _LE
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
@ -75,9 +72,9 @@ class InstanceJobTracker(object):
os.kill(pid, signal.SIGKILL)
except OSError as exc:
if exc.errno != errno.ESRCH:
LOG.error(_LE('Failed to kill process %(pid)s '
'due to %(reason)s, while deleting the '
'instance.'), {'pid': pid, 'reason': exc},
LOG.error('Failed to kill process %(pid)s '
'due to %(reason)s, while deleting the '
'instance.', {'pid': pid, 'reason': exc},
instance=instance)
try:
@ -85,14 +82,12 @@ class InstanceJobTracker(object):
os.kill(pid, 0)
except OSError as exc:
if exc.errno != errno.ESRCH:
LOG.error(_LE('Unexpected error while checking process '
'%(pid)s.'), {'pid': pid},
instance=instance)
LOG.error('Unexpected error while checking process '
'%(pid)s.', {'pid': pid}, instance=instance)
else:
# The process is still around
LOG.warning(_LW("Failed to kill a long running process "
"%(pid)s related to the instance when "
"deleting it."), {'pid': pid},
instance=instance)
LOG.warning("Failed to kill a long running process "
"%(pid)s related to the instance when "
"deleting it.", {'pid': pid}, instance=instance)
self.remove_job(instance, pid)

View File

@ -24,8 +24,6 @@ from oslo_log import log as logging
from nova.compute import power_state
import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
@ -240,7 +238,7 @@ def find_job_type(guest, instance):
instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED
else:
LOG.info(_LI("Error %(ex)s, migration failed"),
LOG.info("Error %(ex)s, migration failed",
{"ex": ex}, instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED
@ -271,15 +269,14 @@ def should_abort(instance, now,
if (progress_timeout != 0 and
(now - progress_time) > progress_timeout):
LOG.warning(_LW("Live migration stuck for %d sec"),
LOG.warning("Live migration stuck for %d sec",
(now - progress_time), instance=instance)
return True
if (completion_timeout != 0 and
elapsed > completion_timeout):
LOG.warning(
_LW("Live migration not completed after %d sec"),
completion_timeout, instance=instance)
LOG.warning("Live migration not completed after %d sec",
completion_timeout, instance=instance)
return True
return False
@ -359,8 +356,8 @@ def update_downtime(guest, instance,
instance=instance)
return olddowntime
LOG.info(_LI("Increasing downtime to %(downtime)d ms "
"after %(waittime)d sec elapsed time"),
LOG.info("Increasing downtime to %(downtime)d ms "
"after %(waittime)d sec elapsed time",
{"downtime": thisstep[1],
"waittime": thisstep[0]},
instance=instance)
@ -368,8 +365,7 @@ def update_downtime(guest, instance,
try:
guest.migrate_configure_max_downtime(thisstep[1])
except libvirt.libvirtError as e:
LOG.warning(_LW("Unable to increase max downtime to %(time)d"
"ms: %(e)s"),
LOG.warning("Unable to increase max downtime to %(time)d ms: %(e)s",
{"time": thisstep[1], "e": e}, instance=instance)
return thisstep[1]
@ -404,14 +400,13 @@ def trigger_postcopy_switch(guest, instance, migration):
try:
guest.migrate_start_postcopy()
except libvirt.libvirtError as e:
LOG.warning(_LW("Failed to switch to post-copy live "
"migration: %s"),
LOG.warning("Failed to switch to post-copy live migration: %s",
e, instance=instance)
else:
# NOTE(ltomas): Change the migration status to indicate that
# it is in post-copy active mode, i.e., the VM at
# destination is the active one
LOG.info(_LI("Switching to post-copy migration mode"),
LOG.info("Switching to post-copy migration mode",
instance=instance)
migration.status = 'running (post-copy)'
migration.save()
@ -443,8 +438,8 @@ def run_tasks(guest, instance, active_migrations, on_migration_failure,
task = tasks.popleft()
if task == 'force-complete':
if migration.status == 'running (post-copy)':
LOG.warning(_LW("Live-migration %s already switched "
"to post-copy mode."),
LOG.warning("Live-migration %s already switched "
"to post-copy mode.",
instance=instance)
elif is_post_copy_enabled:
trigger_postcopy_switch(guest, instance, migration)
@ -453,11 +448,11 @@ def run_tasks(guest, instance, active_migrations, on_migration_failure,
guest.pause()
on_migration_failure.append("unpause")
except Exception as e:
LOG.warning(_LW("Failed to pause instance during "
"live-migration %s"),
LOG.warning("Failed to pause instance during "
"live-migration %s",
e, instance=instance)
else:
LOG.warning(_LW("Unknown migration task '%(task)s'"),
LOG.warning("Unknown migration task '%(task)s'",
{"task": task}, instance=instance)
@ -488,11 +483,11 @@ def run_recover_tasks(host, guest, instance, on_migration_failure):
if state == power_state.PAUSED:
guest.resume()
except Exception as e:
LOG.warning(_LW("Failed to resume paused instance "
"before live-migration rollback %s"),
LOG.warning("Failed to resume paused instance "
"before live-migration rollback %s",
e, instance=instance)
else:
LOG.warning(_LW("Unknown migration task '%(task)s'"),
LOG.warning("Unknown migration task '%(task)s'",
{"task": task}, instance=instance)

View File

@ -20,7 +20,6 @@ from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from nova.i18n import _LE
from nova.virt.libvirt import utils
@ -67,8 +66,8 @@ def create_volume(target, device, cipher, key_size, key):
utils.execute(*cmd, process_input=key, run_as_root=True)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not start encryption for disk %(device)s: "
"%(exception)s"), {'device': device, 'exception': e})
LOG.error("Could not start encryption for disk %(device)s: "
"%(exception)s", {'device': device, 'exception': e})
def delete_volume(target):
@ -87,10 +86,10 @@ def delete_volume(target):
LOG.debug("Ignoring exit code 4, volume already destroyed")
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not disconnect encrypted volume "
"%(volume)s. If dm-crypt device is still active "
"it will have to be destroyed manually for "
"cleanup to succeed."), {'volume': target})
LOG.error("Could not disconnect encrypted volume "
"%(volume)s. If dm-crypt device is still active "
"it will have to be destroyed manually for "
"cleanup to succeed.", {'volume': target})
def list_volumes():

View File

@ -27,7 +27,6 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.virt.libvirt import utils
CONF = nova.conf.CONF
@ -62,11 +61,11 @@ def create_volume(vg, lv, size, sparse=False):
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_LW('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)d bytes,'
' but free space on volume group is'
' only %(free_space)db.'),
LOG.warning('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)d bytes,'
' but free space on volume group is'
' only %(free_space)db.',
{'vg': vg,
'free_space': free_space,
'size': size,
@ -210,8 +209,7 @@ def clear_volume(path):
try:
volume_size = get_volume_size(path)
except exception.VolumeBDMPathNotFound:
LOG.warning(_LW('ignoring missing logical volume %(path)s'),
{'path': path})
LOG.warning('ignoring missing logical volume %(path)s', {'path': path})
return
if volume_clear_size != 0 and volume_clear_size < volume_size:

View File

@ -32,8 +32,6 @@ from oslo_utils import units
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
@ -78,7 +76,7 @@ class RBDVolumeProxy(object):
driver._disconnect_from_rados(client, ioctx)
except rbd.Error:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("error opening rbd image %s"), name)
LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
self.driver = driver
@ -306,13 +304,13 @@ class RBDDriver(object):
try:
RbdProxy().remove(client.ioctx, name)
except rbd.ImageNotFound:
LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be '
'found, failed to remove'),
LOG.warning('image %(volume)s in pool %(pool)s can not be '
'found, failed to remove',
{'volume': name, 'pool': self.pool})
except rbd.ImageHasSnapshots:
LOG.error(_LE('image %(volume)s in pool %(pool)s has '
'snapshots, failed to remove'),
{'volume': name, 'pool': self.pool})
LOG.error('image %(volume)s in pool %(pool)s has '
'snapshots, failed to remove',
{'volume': name, 'pool': self.pool})
def import_image(self, base, name):
"""Import RBD volume from image file.
@ -342,9 +340,8 @@ class RBDDriver(object):
self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s '
'failed'),
{'volume': volume, 'pool': self.pool})
LOG.warning('rbd remove %(volume)s in pool %(pool)s failed',
{'volume': volume, 'pool': self.pool})
retryctx['retries'] -= 1
if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone()
@ -406,17 +403,16 @@ class RBDDriver(object):
if force:
vol.unprotect_snap(name)
elif not ignore_errors:
LOG.warning(_LW('snapshot(%(name)s) on rbd '
'image(%(img)s) is protected, '
'skipping'),
LOG.warning('snapshot(%(name)s) on rbd '
'image(%(img)s) is protected, skipping',
{'name': name, 'img': volume})
return
LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
{'name': name, 'img': volume})
vol.remove_snap(name)
elif not ignore_errors:
LOG.warning(_LW('no snapshot(%(name)s) found on rbd '
'image(%(img)s)'),
LOG.warning('no snapshot(%(name)s) found on rbd '
'image(%(img)s)',
{'name': name, 'img': volume})
def rollback_to_snap(self, volume, name):

View File

@ -27,8 +27,6 @@ from oslo_log import log as logging
import nova.conf
from nova.i18n import _
from nova.i18n import _LI
from nova.i18n import _LW
from nova.objects import fields as obj_fields
from nova import utils
from nova.virt.disk import api as disk
@ -167,7 +165,7 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
else:
return "tap"
else:
LOG.info(_LI("tap-ctl check: %s"), out)
LOG.info("tap-ctl check: %s", out)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("tap-ctl tool is not installed")
@ -279,8 +277,8 @@ def update_mtime(path):
# the same base image and using shared storage, so log the exception
# but don't fail. Ideally we'd know if we were on shared storage and
# would re-raise the error if we are not on shared storage.
LOG.warning(_LW("Failed to update mtime on path %(path)s. "
"Error: %(error)s"),
LOG.warning("Failed to update mtime on path %(path)s. "
"Error: %(error)s",
{'path': path, "error": exc})

View File

@ -29,7 +29,6 @@ from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova.network import os_vif_util
@ -634,10 +633,8 @@ class LibvirtGenericVIFDriver(object):
fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while plugging ib hostdev vif"),
instance=instance
)
LOG.exception(_("Failed while plugging ib hostdev vif"),
instance=instance)
def plug_802qbg(self, instance, vif):
pass
@ -679,7 +676,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
@ -700,7 +697,7 @@ class LibvirtGenericVIFDriver(object):
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
@ -754,7 +751,7 @@ class LibvirtGenericVIFDriver(object):
linux_net.create_tap_dev(dev, multiqueue=multiqueue)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
LOG.exception(_("Failed while plugging vif"), instance=instance)
def _plug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
@ -817,16 +814,14 @@ class LibvirtGenericVIFDriver(object):
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
@ -844,8 +839,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
@ -864,7 +858,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True)
except Exception:
LOG.exception(_LE("Failed while unplugging ib hostdev vif"))
LOG.exception(_("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
@ -900,8 +894,7 @@ class LibvirtGenericVIFDriver(object):
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
@ -909,8 +902,7 @@ class LibvirtGenericVIFDriver(object):
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
@ -926,8 +918,7 @@ class LibvirtGenericVIFDriver(object):
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_vhostuser(self, instance, vif):
pass
@ -943,8 +934,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while unplugging vif"), instance=instance)
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def _unplug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)

View File

@ -16,7 +16,6 @@ from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
@ -73,7 +72,7 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
try:
self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning(_LW('Ignoring VolumeDeviceNotFound: %s'), exc)
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)

View File

@ -23,7 +23,7 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _LE, _LW
from nova.i18n import _
from nova import utils
CONF = nova.conf.CONF
@ -111,8 +111,7 @@ class _HostMountStateManager(object):
"""
with self.cond:
if self.state is not None:
LOG.warning(_LW("host_up called, but we think host is "
"already up"))
LOG.warning("host_up called, but we think host is already up")
self._host_down()
# Wait until all operations using a previous state generation are
@ -139,8 +138,7 @@ class _HostMountStateManager(object):
"""
with self.cond:
if self.state is None:
LOG.warning(_LW("host_down called, but we don't think host "
"is up"))
LOG.warning("host_down called, but we don't think host is up")
return
self._host_down()
@ -313,10 +311,10 @@ class _HostMountState(object):
# We're not going to raise the exception because we're
# in the desired state anyway. However, this is still
# unusual so we'll log it.
LOG.exception(_LE('Error mounting %(fstype)s export '
'%(export)s on %(mountpoint)s. '
'Continuing because mountpount is '
'mounted despite this.'),
LOG.exception(_('Error mounting %(fstype)s export '
'%(export)s on %(mountpoint)s. '
'Continuing because mountpount is '
'mounted despite this.'),
{'fstype': fstype, 'export': export,
'mountpoint': mountpoint})
@ -353,10 +351,9 @@ class _HostMountState(object):
try:
mount.remove_attachment(vol_name, instance.uuid)
except KeyError:
LOG.warning(_LW("Request to remove attachment "
"(%(vol_name)s, %(instance)s) from "
"%(mountpoint)s, but we don't think it's in "
"use."),
LOG.warning("Request to remove attachment "
"(%(vol_name)s, %(instance)s) from "
"%(mountpoint)s, but we don't think it's in use.",
{'vol_name': vol_name, 'instance': instance.uuid,
'mountpoint': mountpoint})
@ -384,15 +381,15 @@ class _HostMountState(object):
utils.execute('umount', mountpoint, run_as_root=True,
attempts=3, delay_on_retry=True)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("Couldn't unmount %(mountpoint)s: %(reason)s"),
LOG.error("Couldn't unmount %(mountpoint)s: %(reason)s",
{'mountpoint': mountpoint, 'reason': six.text_type(ex)})
if not os.path.ismount(mountpoint):
try:
utils.execute('rmdir', mountpoint)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("Couldn't remove directory %(mountpoint)s: "
"%(reason)s"),
LOG.error("Couldn't remove directory %(mountpoint)s: "
"%(reason)s",
{'mountpoint': mountpoint,
'reason': six.text_type(ex)})
return False

View File

@ -14,7 +14,7 @@ from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _, _LW
from nova.i18n import _
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
@ -81,10 +81,10 @@ class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# NOTE(mriedem): We'll have to be extra careful about this in case
# the reason we got here is due to an old volume connection created
# before we started preferring the Cinder settings in Ocata.
LOG.warning(_LW('Falling back to Nova configuration values for '
'RBD authentication. Cinder should be configured '
'for auth with Ceph volumes. This fallback will '
'be dropped in the Nova 16.0.0 Pike release.'))
LOG.warning('Falling back to Nova configuration values for '
'RBD authentication. Cinder should be configured '
'for auth with Ceph volumes. This fallback will '
'be dropped in the Nova 16.0.0 Pike release.')
# use the nova config values
conf.auth_username = CONF.libvirt.rbd_user
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid

View File

@ -24,8 +24,6 @@ import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import fs
@ -53,7 +51,7 @@ def mount_volume(volume, mnt_base, configfile=None):
mnt_base)
# Run mount command but do not fail on already mounted exit code
utils.execute(*command, check_exit_code=[0, 4])
LOG.info(_LI('Mounted volume: %s'), volume)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
@ -62,10 +60,9 @@ def umount_volume(mnt_base):
utils.execute('umount.quobyte', mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error(_LE("The Quobyte volume at %s is still in use."),
mnt_base)
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_LE("Couldn't unmount the Quobyte Volume at %s"),
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
@ -81,8 +78,8 @@ def validate_volume(mnt_base):
raise nova_exception.InternalError(msg)
if not os.access(mnt_base, os.W_OK | os.X_OK):
msg = (_LE("Volume is not writable. Please broaden the file"
" permissions. Mount: %s") % mnt_base)
msg = _("Volume is not writable. Please broaden the file"
" permissions. Mount: %s") % mnt_base
raise nova_exception.InternalError(msg)
@ -121,8 +118,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
except OSError as exc:
if exc.errno == errno.ENOTCONN:
mounted = False
LOG.info(_LI('Fixing previous mount %s which was not'
' unmounted correctly.'), mount_path)
LOG.info('Fixing previous mount %s which was not'
' unmounted correctly.', mount_path)
umount_volume(mount_path)
if not mounted:
@ -143,7 +140,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume):
umount_volume(mount_path)
else:
LOG.info(_LI("Trying to disconnected unmounted volume at %s"),
LOG.info("Trying to disconnected unmounted volume at %s",
mount_path)
def _normalize_export(self, export):

View File

@ -24,7 +24,7 @@ from oslo_utils import importutils
import six
import nova.conf
from nova.i18n import _LE, _LW
from nova.i18n import _
from nova import utils
LOG = logging.getLogger(__name__)
@ -52,7 +52,7 @@ def mount_share(mount_path, export_path,
utils.execute(*mount_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.warning(_LW("%s is already mounted"), export_path)
LOG.warning("%s is already mounted", export_path)
else:
raise
@ -70,8 +70,7 @@ def unmount_share(mount_path, export_path):
if 'target is busy' in six.text_type(exc):
LOG.debug("The share %s is still in use.", export_path)
else:
LOG.exception(_LE("Couldn't unmount the share %s"),
export_path)
LOG.exception(_("Couldn't unmount the share %s"), export_path)
class RemoteFilesystem(object):

View File

@ -21,8 +21,6 @@ from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _LE
from nova.i18n import _LW
from nova import profiler
from nova.virt.libvirt import config as vconfig
import nova.virt.libvirt.driver
@ -76,8 +74,8 @@ class LibvirtBaseVolumeDriver(object):
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warning(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
LOG.warning('Unknown content in connection_info/'
'qos_specs: %s', specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
@ -85,8 +83,8 @@ class LibvirtBaseVolumeDriver(object):
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
LOG.error('Unknown content in '
'connection_info/access_mode: %s',
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
from oslo_utils import importutils
from nova import exception
from nova.i18n import _LW, _LI
libosinfo = None
LOG = logging.getLogger(__name__)
@ -40,7 +39,7 @@ class _OsInfoDatabase(object):
libosinfo = importutils.import_module(
'gi.repository.Libosinfo')
except ImportError as exp:
LOG.info(_LI("Cannot load Libosinfo: (%s)"), exp)
LOG.info("Cannot load Libosinfo: (%s)", exp)
else:
self.loader = libosinfo.Loader()
self.loader.process_default_path()
@ -94,8 +93,7 @@ class OsInfo(object):
try:
return _OsInfoDatabase.get_instance().get_os(os_name)
except exception.NovaException as e:
LOG.warning(_LW("Cannot find OS information "
"- Reason: (%s)"), e)
LOG.warning("Cannot find OS information - Reason: (%s)", e)
@property
def network_model(self):

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.i18n import _LW
from nova import utils
LOG = logging.getLogger(__name__)
@ -62,7 +61,7 @@ def register_storage_use(storage_path, hostname):
try:
d = jsonutils.loads(f.read())
except ValueError:
LOG.warning(_LW("Cannot decode JSON from %(id_path)s"),
LOG.warning("Cannot decode JSON from %(id_path)s",
{"id_path": id_path})
d[hostname] = time.time()
@ -90,7 +89,7 @@ def get_storage_users(storage_path):
try:
d = jsonutils.loads(f.read())
except ValueError:
LOG.warning(_LW("Cannot decode JSON from %(id_path)s"),
LOG.warning("Cannot decode JSON from %(id_path)s",
{"id_path": id_path})
recent_users = []

View File

@ -34,7 +34,7 @@ from nova.compute import power_state
from nova.compute import task_states
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova.i18n import _
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util
@ -131,7 +131,7 @@ class VMwareVCDriver(driver.ComputeDriver):
next_min_ver = v_utils.convert_version_to_int(
constants.NEXT_MIN_VC_VERSION)
vc_version = vim_util.get_vc_version(self._session)
LOG.info(_LI("VMware vCenter version: %s"), vc_version)
LOG.info("VMware vCenter version: %s", vc_version)
if v_utils.convert_version_to_int(vc_version) < min_version:
raise exception.NovaException(
_('Detected vCenter version %(version)s. Nova requires VMware '
@ -139,10 +139,10 @@ class VMwareVCDriver(driver.ComputeDriver):
'version': vc_version,
'min_version': constants.MIN_VC_VERSION})
elif v_utils.convert_version_to_int(vc_version) < next_min_ver:
LOG.warning(_LW('Running Nova with a VMware vCenter version less '
'than %(version)s is deprecated. The required '
'minimum version of vCenter will be raised to '
'%(version)s in the 16.0.0 release.'),
LOG.warning('Running Nova with a VMware vCenter version less '
'than %(version)s is deprecated. The required '
'minimum version of vCenter will be raised to '
'%(version)s in the 16.0.0 release.',
{'version': constants.NEXT_MIN_VC_VERSION})
@property
@ -166,8 +166,7 @@ class VMwareVCDriver(driver.ComputeDriver):
CONF.vmware.pbm_default_policy):
raise error_util.PbmDefaultPolicyDoesNotExist()
if CONF.vmware.datastore_regex:
LOG.warning(_LW(
"datastore_regex is ignored when PBM is enabled"))
LOG.warning("datastore_regex is ignored when PBM is enabled")
self._datastore_regex = None
def init_host(self, host):
@ -365,13 +364,13 @@ class VMwareVCDriver(driver.ComputeDriver):
self.detach_volume(connection_info, instance,
disk.get('device_name'))
except exception.DiskNotFound:
LOG.warning(_LW('The volume %s does not exist!'),
LOG.warning('The volume %s does not exist!',
disk.get('device_name'),
instance=instance)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to detach %(device_name)s. "
"Exception: %(exc)s"),
LOG.error("Failed to detach %(device_name)s. "
"Exception: %(exc)s",
{'device_name': disk.get('device_name'),
'exc': e},
instance=instance)
@ -396,8 +395,8 @@ class VMwareVCDriver(driver.ComputeDriver):
try:
self._detach_instance_volumes(instance, block_device_info)
except vexc.ManagedObjectNotFoundException:
LOG.warning(_LW('Instance does not exists. Proceeding to '
'delete instance properties on datastore'),
LOG.warning('Instance does not exists. Proceeding to '
'delete instance properties on datastore',
instance=instance)
self._vmops.destroy(instance, destroy_disks)

View File

@ -24,7 +24,7 @@ from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.i18n import _, _LE, _LI
from nova.i18n import _
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@ -277,7 +277,7 @@ def disk_move(session, dc_ref, src_file, dst_file):
destDatacenter=dc_ref,
force=False)
session._wait_for_task(move_task)
LOG.info(_LI("Moved virtual disk from %(src)s to %(dst)s."),
LOG.info("Moved virtual disk from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
@ -295,7 +295,7 @@ def disk_copy(session, dc_ref, src_file, dst_file):
destDatacenter=dc_ref,
force=False)
session._wait_for_task(copy_disk_task)
LOG.info(_LI("Copied virtual disk from %(src)s to %(dst)s."),
LOG.info("Copied virtual disk from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
@ -309,7 +309,7 @@ def disk_delete(session, dc_ref, file_path):
name=str(file_path),
datacenter=dc_ref)
session._wait_for_task(delete_disk_task)
LOG.info(_LI("Deleted virtual disk %s."), file_path)
LOG.info("Deleted virtual disk %s.", file_path)
def file_move(session, dc_ref, src_file, dst_file):
@ -451,8 +451,7 @@ def _filter_datastores_matching_storage_policy(session, data_stores,
if oc.obj in matching_ds]
data_stores.objects = object_contents
return data_stores
LOG.error(_LE("Unable to retrieve storage policy with name %s"),
storage_policy)
LOG.error("Unable to retrieve storage policy with name %s", storage_policy)
def _update_datacenter_cache_from_objects(session, dcs):

View File

@ -25,7 +25,6 @@ from oslo_vmware import exceptions as vexc
import nova.conf
from nova import context
from nova import exception
from nova.i18n import _LW
from nova import objects
from nova.objects import fields as obj_fields
from nova.virt.vmwareapi import ds_util
@ -79,8 +78,8 @@ class VCState(object):
about_info = self._session._call_method(vim_util, "get_about_info")
except (vexc.VimConnectionException, vexc.VimAttributeException) as ex:
# VimAttributeException is thrown when vpxd service is down
LOG.warning(_LW("Failed to connect with %(node)s. "
"Error: %(error)s"),
LOG.warning("Failed to connect with %(node)s. "
"Error: %(error)s",
{'node': self._host_name, 'error': ex})
self._set_host_enabled(False)
return data

View File

@ -42,7 +42,6 @@ from oslo_utils import timeutils
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util as vutil
from nova.i18n import _LI, _LW
from nova.virt import imagecache
from nova.virt.vmwareapi import ds_util
@ -69,7 +68,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
vexc.FileLockedException) as e:
# There may be more than one process or thread that tries
# to delete the file.
LOG.warning(_LW("Unable to delete %(file)s. Exception: %(ex)s"),
LOG.warning("Unable to delete %(file)s. Exception: %(ex)s",
{'file': ds_path, 'ex': e})
except vexc.FileNotFoundException:
LOG.debug("File not found: %s", ds_path)
@ -157,13 +156,12 @@ class ImageCacheManager(imagecache.ImageCacheManager):
ds_util.mkdir(self._session, ts_path, dc_info.ref)
except vexc.FileAlreadyExistsException:
LOG.debug("Timestamp already exists.")
LOG.info(_LI("Image %s is no longer used by this node. "
"Pending deletion!"), image)
LOG.info("Image %s is no longer used by this node. "
"Pending deletion!", image)
else:
dt = self._get_datetime_from_filename(str(ts))
if timeutils.is_older_than(dt, age_seconds):
LOG.info(_LI("Image %s is no longer used. "
"Deleting!"), path)
LOG.info("Image %s is no longer used. Deleting!", path)
# Image has aged - delete the image ID folder
self._folder_delete(path, dc_info.ref)

View File

@ -31,7 +31,7 @@ from oslo_vmware import rw_handles
from nova import exception
from nova.i18n import _, _LI
from nova.i18n import _
from nova import image
from nova.objects import fields
from nova.virt.vmwareapi import constants
@ -358,11 +358,11 @@ def fetch_image_stream_optimized(context, instance, session, vm_name,
imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
LOG.info("Downloaded image file data %(image_ref)s",
{'image_ref': instance.image_ref}, instance=instance)
vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
LOG.info("The imported VM was unregistered", instance=instance)
return vmdk.capacity_in_bytes
@ -420,15 +420,15 @@ def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_import_spec,
file_size)
image_transfer(extracted, write_handle)
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
LOG.info("Downloaded OVA image file %(image_ref)s",
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
vmdk = vm_util.get_vmdk_info(session,
imported_vm_ref,
vm_name)
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
LOG.info("The imported VM was unregistered",
instance=instance)
return vmdk.capacity_in_bytes
raise exception.ImageUnacceptable(

View File

@ -21,7 +21,7 @@ from oslo_vmware import vim_util
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LW
from nova.i18n import _
from nova.network import model
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import network_util
@ -96,8 +96,8 @@ def _check_ovs_supported_version(session):
vc_version = versionutils.convert_version_to_int(
vim_util.get_vc_version(session))
if vc_version < min_version:
LOG.warning(_LW('VMware vCenter version less than %(version)s '
'does not support the \'ovs\' port type.'),
LOG.warning('VMware vCenter version less than %(version)s '
'does not support the \'ovs\' port type.',
{'version': constants.MIN_VC_OVS_VERSION})
@ -118,9 +118,9 @@ def _get_neutron_network(session, cluster, vif):
if not net_id:
# Make use of the original one, in the event that the
# plugin does not pass the aforementioned id
LOG.info(_LI('NSX Logical switch ID is not present. '
'Using network ID to attach to the '
'opaque network.'))
LOG.info('NSX Logical switch ID is not present. '
'Using network ID to attach to the '
'opaque network.')
net_id = vif['network']['id']
use_external_id = True
network_type = 'nsx.LogicalSwitch'

View File

@ -33,7 +33,7 @@ from oslo_vmware import vim_util as vutil
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova.network import model as network_model
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
@ -1280,7 +1280,7 @@ def get_all_cluster_mors(session):
return results.objects
except Exception as excep:
LOG.warning(_LW("Failed to get cluster references %s"), excep)
LOG.warning("Failed to get cluster references %s", excep)
def get_cluster_ref_by_name(session, cluster_name):
@ -1327,10 +1327,10 @@ def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
# Consequently, a value which we don't recognise may in fact be valid.
with excutils.save_and_reraise_exception():
if config_spec.guestId not in constants.VALID_OS_TYPES:
LOG.warning(_LW('vmware_ostype from image is not recognised: '
'\'%(ostype)s\'. An invalid os type may be '
'one cause of this instance creation failure'),
{'ostype': config_spec.guestId})
LOG.warning('vmware_ostype from image is not recognised: '
'\'%(ostype)s\'. An invalid os type may be '
'one cause of this instance creation failure',
{'ostype': config_spec.guestId})
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
@ -1344,9 +1344,9 @@ def destroy_vm(session, instance, vm_ref=None):
destroy_task = session._call_method(session.vim, "Destroy_Task",
vm_ref)
session._wait_for_task(destroy_task)
LOG.info(_LI("Destroyed the VM"), instance=instance)
LOG.info("Destroyed the VM", instance=instance)
except Exception:
LOG.exception(_LE('Destroy VM failed'), instance=instance)
LOG.exception(_('Destroy VM failed'), instance=instance)
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
@ -1606,7 +1606,7 @@ def create_folder(session, parent_folder_ref, name):
try:
folder = session._call_method(session.vim, "CreateFolder",
parent_folder_ref, name=name)
LOG.info(_LI("Created folder: %(name)s in parent %(parent)s."),
LOG.info("Created folder: %(name)s in parent %(parent)s.",
{'name': name, 'parent': parent_folder_ref.value})
except vexc.DuplicateName as e:
LOG.debug("Folder already exists: %(name)s. Parent ref: %(parent)s.",

View File

@ -43,7 +43,7 @@ import nova.conf
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova import network
from nova import objects
from nova import utils
@ -176,7 +176,7 @@ class VMwareVMOps(object):
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Extending virtual disk failed with error: %s'),
LOG.error('Extending virtual disk failed with error: %s',
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
@ -392,7 +392,7 @@ class VMwareVMOps(object):
host, cookies = self._get_esx_host_and_cookies(vi.datastore,
dc_path, image_ds_loc.rel_path)
except Exception as e:
LOG.warning(_LW("Get esx cookies failed: %s"), e,
LOG.warning("Get esx cookies failed: %s", e,
instance=vi.instance)
dc_path = vutil.get_inventory_path(session.vim, vi.dc_info.ref)
@ -507,8 +507,8 @@ class VMwareVMOps(object):
# due to action external to the process.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."),
LOG.warning("Destination %s already exists! Concurrent moves "
"can lead to unexpected results.",
dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
@ -833,7 +833,7 @@ class VMwareVMOps(object):
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_LI('Using config drive for instance'), instance=instance)
LOG.info('Using config drive for instance', instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
@ -861,7 +861,7 @@ class VMwareVMOps(object):
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with error: %s'),
LOG.error('Creating config drive failed with error: %s',
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
@ -941,8 +941,7 @@ class VMwareVMOps(object):
name=vm_name,
spec=clone_spec)
self._session._wait_for_task(vm_clone_task)
LOG.info(_LI("Created linked-clone VM from snapshot"),
instance=instance)
LOG.info("Created linked-clone VM from snapshot", instance=instance)
task_info = self._session._call_method(vutil,
"get_object_property",
vm_clone_task,
@ -1077,9 +1076,9 @@ class VMwareVMOps(object):
"UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, got "
"this exception while un-registering the VM: "
"%s"), excep, instance=instance)
LOG.warning("In vmwareapi:vmops:_destroy_instance, got "
"this exception while un-registering the VM: %s",
excep, instance=instance)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and vm_ds_path:
@ -1100,16 +1099,15 @@ class VMwareVMOps(object):
{'datastore_name': vm_ds_path.datastore},
instance=instance)
except Exception:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents "
"from the disk"),
LOG.warning("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents "
"from the disk",
exc_info=True, instance=instance)
except exception.InstanceNotFound:
LOG.warning(_LW('Instance does not exist on backend'),
LOG.warning('Instance does not exist on backend',
instance=instance)
except Exception:
LOG.exception(_LE('Destroy instance failed'),
instance=instance)
LOG.exception(_('Destroy instance failed'), instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance.uuid)
@ -1238,7 +1236,7 @@ class VMwareVMOps(object):
rescue_device = self._get_rescue_device(instance, vm_ref)
except exception.NotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to access the rescue disk'),
LOG.error('Unable to access the rescue disk',
instance=instance)
vm_util.power_off_instance(self._session, instance, vm_ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device,
@ -1488,11 +1486,11 @@ class VMwareVMOps(object):
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info)
LOG.info("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds", instances_info)
for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
LOG.info("Automatically hard rebooting", instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
@ -1763,8 +1761,7 @@ class VMwareVMOps(object):
vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec)
except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: '
'%s'),
LOG.error('Attaching network adapter failed. Exception: %s',
e, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
@ -1812,8 +1809,7 @@ class VMwareVMOps(object):
vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec)
except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: '
'%s'),
LOG.error('Detaching network adapter failed. Exception: %s',
e, instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
@ -1883,14 +1879,11 @@ class VMwareVMOps(object):
str(vi.cache_image_path),
str(sized_disk_ds_loc))
except Exception as e:
LOG.warning(_LW("Root disk file creation "
"failed - %s"),
LOG.warning("Root disk file creation failed - %s",
e, instance=vi.instance)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached '
'image %(source)s to '
'%(dest)s for resize: '
'%(error)s'),
LOG.error('Failed to copy cached image %(source)s to '
'%(dest)s for resize: %(error)s',
{'source': vi.cache_image_path,
'dest': sized_disk_ds_loc,
'error': e},

View File

@ -23,7 +23,7 @@ from oslo_vmware import vim_util as vutil
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LW
from nova.i18n import _
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vm_util
@ -461,8 +461,8 @@ class VMwareVolumeOps(object):
# The volume has been moved from its original location.
# Need to consolidate the VMDK files.
LOG.info(_LI("The volume's backing has been relocated to %s. Need to "
"consolidate backing disk file."), current_device_path)
LOG.info("The volume's backing has been relocated to %s. Need to "
"consolidate backing disk file.", current_device_path)
# Pick the host and resource pool on which the instance resides.
# Move the volume to the datastore where the new VMDK file is present.
@ -479,8 +479,8 @@ class VMwareVolumeOps(object):
except oslo_vmw_exceptions.FileNotFoundException:
# Volume's vmdk was moved; remove the device so that we can
# relocate the volume.
LOG.warning(_LW("Virtual disk: %s of volume's backing not found."),
original_device_path, exc_info=True)
LOG.warning("Virtual disk: %s of volume's backing not found.",
original_device_path, exc_info=True)
LOG.debug("Removing disk device of volume's backing and "
"reattempting relocate.")
self.detach_disk_from_vm(volume_ref, instance, original_device)

View File

@ -34,7 +34,7 @@ import nova.conf
from nova import context
from nova import crypto
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova import objects
from nova import utils
@ -75,8 +75,8 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
except XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_LE('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
LOG.error('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r',
{'method': method, 'args': args}, instance=instance)
raise exception.AgentTimeout(method=method.__name__)
elif 'REBOOT:' in err_msg:
@ -87,13 +87,13 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
return _call_agent(session, instance, vm_ref, method,
addl_args, timeout, success_codes)
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_LE('NOT IMPLEMENTED: The call to %(method)s is not '
'supported by the agent. args=%(args)r'),
LOG.error('NOT IMPLEMENTED: The call to %(method)s is not '
'supported by the agent. args=%(args)r',
{'method': method, 'args': args}, instance=instance)
raise exception.AgentNotImplemented(method=method.__name__)
else:
LOG.error(_LE('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
LOG.error('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r',
{'method': method, 'args': args, 'e': e},
instance=instance)
raise exception.AgentError(method=method.__name__)
@ -102,15 +102,15 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
try:
ret = jsonutils.loads(ret)
except TypeError:
LOG.error(_LE('The agent call to %(method)s returned an invalid '
'response: %(ret)r. args=%(args)r'),
LOG.error('The agent call to %(method)s returned an invalid '
'response: %(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method.__name__)
if ret['returncode'] not in success_codes:
LOG.error(_LE('The agent call to %(method)s returned '
'an error: %(ret)r. args=%(args)r'),
LOG.error('The agent call to %(method)s returned '
'an error: %(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method.__name__)
@ -157,9 +157,8 @@ class XenAPIBasedAgent(object):
self.vm_ref = vm_ref
def _add_instance_fault(self, error, exc_info):
LOG.warning(_LW("Ignoring error while configuring instance with "
"agent: %s"), error,
instance=self.instance, exc_info=True)
LOG.warning("Ignoring error while configuring instance with agent: %s",
error, instance=self.instance, exc_info=True)
try:
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(
@ -234,9 +233,8 @@ class XenAPIBasedAgent(object):
self._call_agent(host_agent.agent_update, args)
except exception.AgentError as exc:
# Silently fail for agent upgrades
LOG.warning(_LW("Unable to update the agent due "
"to: %(exc)s"), dict(exc=exc),
instance=self.instance)
LOG.warning("Unable to update the agent due to: %(exc)s",
dict(exc=exc), instance=self.instance)
def _exchange_key_with_agent(self):
dh = SimpleDH()
@ -360,20 +358,19 @@ def find_guest_agent(base_dir):
# reconfigure the network from xenstore data,
# so manipulation of files in /etc is not
# required
LOG.info(_LI('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be'
'manipulated'))
LOG.info('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be'
'manipulated')
return True
xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename):
LOG.info(_LI('XenServer tools are present '
'in this image but are not capable '
'of network injection'))
LOG.info('XenServer tools are present '
'in this image but are not capable '
'of network injection')
else:
LOG.info(_LI('XenServer tools are not '
'installed in this image'))
LOG.info('XenServer tools are not installed in this image')
return False
@ -386,8 +383,8 @@ def should_use_agent(instance):
try:
return strutils.bool_from_string(use_agent_raw, strict=True)
except ValueError:
LOG.warning(_LW("Invalid 'agent_present' value. "
"Falling back to the default."),
LOG.warning("Invalid 'agent_present' value. "
"Falling back to the default.",
instance=instance)
return CONF.xenserver.use_agent_default

View File

@ -34,8 +34,8 @@ from oslo_utils import versionutils
import six.moves.urllib.parse as urlparse
import nova.conf
from nova.i18n import _, _LE, _LW
from nova import exception
from nova.i18n import _
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
@ -53,10 +53,10 @@ OVERHEAD_PER_VCPU = 1.5
def invalid_option(option_name, recommended_value):
LOG.exception(_LE('Current value of '
'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"'),
LOG.exception(_('Current value of '
'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"'),
{'option': option_name,
'recommended': recommended_value})
raise exception.NotSupportedWithOption(
@ -120,7 +120,7 @@ class XenAPIDriver(driver.ComputeDriver):
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_LE('Failure while cleaning up attached VDIs'))
LOG.exception(_('Failure while cleaning up attached VDIs'))
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
@ -363,7 +363,7 @@ class XenAPIDriver(driver.ComputeDriver):
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warning(_LW('Could not determine key: %s'), err,
LOG.warning('Could not determine key: %s', err,
instance=instance)
self._initiator = None
return {

View File

@ -30,7 +30,7 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova import objects
from nova.objects import fields as obj_fields
from nova.virt.xenapi import pool_states
@ -73,11 +73,11 @@ class Host(object):
name = vm_rec['name_label']
uuid = _uuid_find(ctxt, host, name)
if not uuid:
LOG.info(_LI('Instance %(name)s running on '
'%(host)s could not be found in '
'the database: assuming it is a '
'worker VM and skip ping migration '
'to a new host'),
LOG.info('Instance %(name)s running on '
'%(host)s could not be found in '
'the database: assuming it is a '
'worker VM and skip ping migration '
'to a new host',
{'name': name, 'host': host})
continue
instance = objects.Instance.get_by_uuid(ctxt, uuid)
@ -105,8 +105,8 @@ class Host(object):
break
except XenAPI.Failure:
LOG.exception(_LE('Unable to migrate VM %(vm_ref)s '
'from %(host)s'),
LOG.exception(_('Unable to migrate VM %(vm_ref)s '
'from %(host)s'),
{'vm_ref': vm_ref, 'host': host})
instance.host = host
instance.vm_state = vm_states.ACTIVE
@ -262,7 +262,7 @@ class HostState(object):
allocated += vdi_physical
physical_used += vdi_physical
except (ValueError, self._session.XenAPI.Failure):
LOG.exception(_LE('Unable to get size for vdi %s'), vdi_ref)
LOG.exception(_('Unable to get size for vdi %s'), vdi_ref)
return (allocated, physical_used)
@ -298,8 +298,8 @@ class HostState(object):
del data['host_memory']
if (data['host_hostname'] !=
self._stats.get('host_hostname', data['host_hostname'])):
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.'),
LOG.error('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.',
{'old': self._stats['host_hostname'],
'new': data['host_hostname']})
data['host_hostname'] = self._stats['host_hostname']
@ -330,7 +330,7 @@ def to_supported_instances(host_capabilities):
result.append((guestarch, obj_fields.HVType.XEN, ostype))
except ValueError:
LOG.warning(_LW("Failed to extract instance support from %s"),
LOG.warning("Failed to extract instance support from %s",
capability)
return result
@ -401,11 +401,11 @@ def call_xenhost(session, method, arg_dict):
return ''
return jsonutils.loads(result)
except ValueError:
LOG.exception(_LE("Unable to get updated status"))
LOG.exception(_("Unable to get updated status"))
return None
except session.XenAPI.Failure as e:
LOG.error(_LE("The call to %(method)s returned "
"an error: %(e)s."), {'method': method, 'e': e})
LOG.error("The call to %(method)s returned "
"an error: %(e)s.", {'method': method, 'e': e})
return e.details[1]
@ -421,11 +421,11 @@ def _call_host_management(session, method, *args):
return ''
return jsonutils.loads(result)
except ValueError:
LOG.exception(_LE("Unable to get updated status"))
LOG.exception(_("Unable to get updated status"))
return None
except session.XenAPI.Failure as e:
LOG.error(_LE("The call to %(method)s returned "
"an error: %(e)s."), {'method': method.__name__, 'e': e})
LOG.error("The call to %(method)s returned an error: %(e)s.",
{'method': method.__name__, 'e': e})
return e.details[1]

View File

@ -25,7 +25,7 @@ import six.moves.urllib.parse as urlparse
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _, _LE
from nova.i18n import _
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
@ -54,8 +54,8 @@ class ResourcePool(object):
aggregate.update_metadata(metadata)
op(host)
except Exception:
LOG.exception(_LE('Aggregate %(aggregate_id)s: unrecoverable '
'state during operation on %(host)s'),
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable '
'state during operation on %(host)s'),
{'aggregate_id': aggregate.id, 'host': host})
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
@ -171,7 +171,7 @@ class ResourcePool(object):
'master_pass': CONF.xenserver.connection_password, }
self._session.call_plugin('xenhost.py', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_LE("Pool-Join failed: %s"), e)
LOG.error("Pool-Join failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=_('Unable to join %s '
@ -190,7 +190,7 @@ class ResourcePool(object):
host_ref = self._session.host.get_by_uuid(host_uuid)
self._session.pool.eject(host_ref)
except self._session.XenAPI.Failure as e:
LOG.error(_LE("Pool-eject failed: %s"), e)
LOG.error("Pool-eject failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=six.text_type(e.details))
@ -201,7 +201,7 @@ class ResourcePool(object):
pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, aggregate_name)
except self._session.XenAPI.Failure as e:
LOG.error(_LE("Unable to set up pool: %s."), e)
LOG.error("Unable to set up pool: %s.", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=six.text_type(e.details))
@ -212,7 +212,7 @@ class ResourcePool(object):
pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, '')
except self._session.XenAPI.Failure as e:
LOG.error(_LE("Pool-set_name_label failed: %s"), e)
LOG.error("Pool-set_name_label failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=six.text_type(e.details))

View File

@ -24,7 +24,6 @@ from nova.compute import power_state
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.network import model as network_model
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
@ -56,8 +55,8 @@ class XenVIFDriver(object):
try:
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
except Exception as e:
LOG.warning(_LW("Failed to create vif, exception:%(exception)s, "
"vif:%(vif)s"), {'exception': e, 'vif': vif})
LOG.warning("Failed to create vif, exception:%(exception)s, "
"vif:%(vif)s", {'exception': e, 'vif': vif})
raise exception.NovaException(
reason=_("Failed to create vif %s") % vif)
@ -79,7 +78,7 @@ class XenVIFDriver(object):
self._session.call_xenapi('VIF.destroy', vif_ref)
except Exception as e:
LOG.warning(
_LW("Fail to unplug vif:%(vif)s, exception:%(exception)s"),
"Fail to unplug vif:%(vif)s, exception:%(exception)s",
{'vif': vif, 'exception': e}, instance=instance)
raise exception.NovaException(
reason=_("Failed to unplug vif %s") % vif)
@ -324,8 +323,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# delete the patch port pair
host_network.ovs_del_port(self._session, bridge_name, patch_port1)
except Exception as e:
LOG.warning(_LW("Failed to delete patch port pair for vif %(if)s,"
" exception:%(exception)s"),
LOG.warning("Failed to delete patch port pair for vif %(if)s,"
" exception:%(exception)s",
{'if': vif, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete patch port pair"))
@ -356,8 +355,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
CONF.xenserver.ovs_integration_bridge,
qvo_name)
except Exception as e:
LOG.warning(_LW("Failed to delete bridge for vif %(if)s, "
"exception:%(exception)s"),
LOG.warning("Failed to delete bridge for vif %(if)s, "
"exception:%(exception)s",
{'if': vif, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete bridge"))
@ -507,8 +506,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
try:
network_ref = self._session.network.create(network_rec)
except Exception as e:
LOG.warning(_LW("Failed to create interim network for vif %(if)s, "
"exception:%(exception)s"),
LOG.warning("Failed to create interim network for vif %(if)s, "
"exception:%(exception)s",
{'if': vif, 'exception': e})
raise exception.VirtualInterfacePlugException(
_("Failed to create the interim network for vif"))

View File

@ -49,7 +49,7 @@ from nova.compute import power_state
from nova.compute import task_states
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova.network import model as network_model
from nova.objects import diagnostics
from nova.objects import fields as obj_fields
@ -263,7 +263,7 @@ def destroy_vm(session, instance, vm_ref):
try:
session.VM.destroy(vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Destroy VM failed'))
LOG.exception(_('Destroy VM failed'))
return
LOG.debug("VM destroyed", instance=instance)
@ -271,7 +271,7 @@ def destroy_vm(session, instance, vm_ref):
def clean_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
LOG.warning("VM already halted, skipping shutdown...",
instance=instance)
return True
@ -279,14 +279,14 @@ def clean_shutdown_vm(session, instance, vm_ref):
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (cleanly) failed.'))
LOG.exception(_('Shutting down VM (cleanly) failed.'))
return False
return True
def hard_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
LOG.warning("VM already halted, skipping shutdown...",
instance=instance)
return True
@ -294,7 +294,7 @@ def hard_shutdown_vm(session, instance, vm_ref):
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (hard) failed'))
LOG.exception(_('Shutting down VM (hard) failed'))
return False
return True
@ -339,15 +339,15 @@ def unplug_vbd(session, vbd_ref, this_vm_ref):
except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_LI('VBD %s already detached'), vbd_ref)
LOG.info('VBD %s already detached', vbd_ref)
return
elif _should_retry_unplug_vbd(err):
LOG.info(_LI('VBD %(vbd_ref)s unplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'),
LOG.info('VBD %(vbd_ref)s unplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d',
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err})
else:
LOG.exception(_LE('Unable to unplug VBD'))
LOG.exception(_('Unable to unplug VBD'))
raise exception.StorageError(
reason=_('Unable to unplug VBD %s') % vbd_ref)
@ -362,7 +362,7 @@ def destroy_vbd(session, vbd_ref):
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to destroy VBD'))
LOG.exception(_('Unable to destroy VBD'))
raise exception.StorageError(
reason=_('Unable to destroy VBD %s') % vbd_ref)
@ -626,8 +626,7 @@ def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
# ensure garbage collector has been run
_scan_sr(session, sr_ref)
LOG.info(_LI("Deleted %s snapshots."), number_of_snapshots,
instance=instance)
LOG.info("Deleted %s snapshots.", number_of_snapshots, instance=instance)
def remove_old_snapshots(session, instance, vm_ref):
@ -788,7 +787,7 @@ def _find_cached_image(session, image_id, sr_ref):
number_found = len(recs)
if number_found > 0:
if number_found > 1:
LOG.warning(_LW("Multiple base images for image: %s"), image_id)
LOG.warning("Multiple base images for image: %s", image_id)
return list(recs.keys())[0]
@ -934,8 +933,7 @@ def try_auto_configure_disk(session, vdi_ref, new_gb):
try:
_auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s')
LOG.warning(msg, e)
LOG.warning('Attempted auto_configure_disk failed because: %s', e)
def _make_partition(session, dev, partition_start, partition_end):
@ -1204,9 +1202,9 @@ def _create_cached_image(context, session, instance, name_label,
sr_type = session.call_xenapi('SR.get_type', sr_ref)
if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_LW("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag."), sr_type)
LOG.warning("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag.", sr_type)
@utils.synchronized('xenapi-image-cache' + image_id)
def _create_cached_image_impl(context, session, instance, name_label,
@ -1279,8 +1277,8 @@ def create_image(context, session, instance, name_label, image_id,
elif cache_images == 'none':
cache = False
else:
LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to"
" True"), CONF.xenserver.cache_images)
LOG.warning("Unrecognized cache_images value '%s', defaulting to True",
CONF.xenserver.cache_images)
cache = True
# Fetch (and cache) the image
@ -1295,9 +1293,9 @@ def create_image(context, session, instance, name_label, image_id,
downloaded = True
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
LOG.info(_LI("Image creation data, cacheable: %(cache)s, "
"downloaded: %(downloaded)s duration: %(duration).2f secs "
"for image %(image_id)s"),
LOG.info("Image creation data, cacheable: %(cache)s, "
"downloaded: %(downloaded)s duration: %(duration).2f secs "
"for image %(image_id)s",
{'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
'duration': duration})
@ -1352,8 +1350,7 @@ def _default_download_handler():
def get_compression_level():
level = CONF.xenserver.image_compression_level
if level is not None and (level < 1 or level > 9):
LOG.warning(_LW("Invalid value '%d' for image_compression_level"),
level)
LOG.warning("Invalid value '%d' for image_compression_level", level)
return None
return level
@ -1420,8 +1417,8 @@ def _check_vdi_size(context, session, instance, vdi_uuid):
size = _get_vdi_chain_size(session, vdi_uuid)
if size > allowed_size:
LOG.error(_LE("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d"),
LOG.error("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d",
{'size': size, 'allowed_size': allowed_size},
instance=instance)
@ -1512,8 +1509,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_LE("Failed to fetch glance image"),
instance=instance)
LOG.exception(_("Failed to fetch glance image"), instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid,
file=filename)],)
@ -1608,7 +1604,7 @@ def lookup_vm_vdis(session, vm_ref):
# This is not an attached volume
vdi_refs.append(vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('"Look for the VDIs failed'))
LOG.exception(_('"Look for the VDIs failed'))
return vdi_refs
@ -1796,7 +1792,7 @@ def compile_diagnostics(vm_rec):
return diags
except expat.ExpatError as e:
LOG.exception(_LE('Unable to parse rrd of %s'), e)
LOG.exception(_('Unable to parse rrd of %s'), e)
return {"Unable to retrieve diagnostics": e}
@ -1826,8 +1822,8 @@ def _scan_sr(session, sr_ref=None, max_attempts=4):
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
if attempt < max_attempts:
ctxt.reraise = False
LOG.warning(_LW("Retry SR scan due to error: "
"%s"), exc)
LOG.warning("Retry SR scan due to error: %s",
exc)
greenthread.sleep(2 ** attempt)
attempt += 1
do_scan(sr_ref)
@ -1859,8 +1855,8 @@ def _find_sr(session):
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect "
"formatting convention"),
LOG.warning("Flag sr_matching_filter '%s' does not respect "
"formatting convention",
CONF.xenserver.sr_matching_filter)
return None
@ -1880,10 +1876,10 @@ def _find_sr(session):
if sr_ref:
return sr_ref
# No SR found!
LOG.error(_LE("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."))
LOG.error("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'.")
return None
@ -1946,8 +1942,8 @@ def _get_rrd(server, vm_uuid):
vm_uuid))
return xml.read()
except IOError:
LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'),
LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'),
{'vm_uuid': vm_uuid, 'server': server})
return None
@ -2161,7 +2157,7 @@ def cleanup_attached_vdis(session):
if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an
# unclean restart
LOG.info(_LI('Disconnecting stale VDI %s from compute domU'),
LOG.info('Disconnecting stale VDI %s from compute domU',
vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref)
@ -2224,12 +2220,11 @@ def _get_dom0_ref(session):
def get_this_vm_uuid(session):
if CONF.xenserver.independent_compute:
msg = _LE("This host has been configured with the independent "
LOG.error("This host has been configured with the independent "
"compute flag. An operation has been attempted which is "
"incompatible with this flag, but should have been "
"caught earlier. Please raise a bug against the "
"OpenStack Nova project")
LOG.error(msg)
raise exception.NotSupportedWithOption(
operation='uncaught operation',
option='CONF.xenserver.independent_compute')
@ -2484,7 +2479,7 @@ def _mounted_processing(device, key, net, metadata):
vfs = vfsimpl.VFSLocalFS(
imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW),
imgdir=tmpdir)
LOG.info(_LI('Manipulating interface files directly'))
LOG.info('Manipulating interface files directly')
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
@ -2493,8 +2488,8 @@ def _mounted_processing(device, key, net, metadata):
finally:
utils.execute('umount', dev_path, run_as_root=True)
else:
LOG.info(_LI('Failed to mount filesystem (expected for '
'non-linux instances): %s'), err)
LOG.info('Failed to mount filesystem (expected for '
'non-linux instances): %s', err)
def ensure_correct_host(session):
@ -2607,14 +2602,14 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
"""
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
if not boot_menu_url:
LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...'), instance=instance)
LOG.warning('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...', instance=instance)
return
network_name = CONF.xenserver.ipxe_network_name
if not network_name:
LOG.warning(_LW('ipxe_network_name not set, user will have to'
' enter IP manually...'), instance=instance)
LOG.warning('ipxe_network_name not set, user will have to'
' enter IP manually...', instance=instance)
return
network = None
@ -2624,8 +2619,8 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
break
if not network:
LOG.warning(_LW("Unable to find network matching '%(network_name)s', "
"user will have to enter IP manually..."),
LOG.warning("Unable to find network matching '%(network_name)s', "
"user will have to enter IP manually...",
{'network_name': network_name}, instance=instance)
return
@ -2649,7 +2644,7 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'CommandNotFound':
LOG.warning(_LW("ISO creation tool '%s' does not exist."),
LOG.warning("ISO creation tool '%s' does not exist.",
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
else:
raise

View File

@ -47,7 +47,7 @@ import nova.conf
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.i18n import _
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import manager as pci_manager
@ -452,8 +452,8 @@ class VMOps(object):
vm_utils.handle_ipxe_iso(
self._session, instance, vdis['iso'], network_info)
else:
LOG.warning(_LW('ipxe_boot is True but no ISO image '
'found'), instance=instance)
LOG.warning('ipxe_boot is True but no ISO image found',
instance=instance)
if resize:
self._resize_up_vdis(instance, vdis)
@ -620,7 +620,7 @@ class VMOps(object):
def _handle_neutron_event_timeout(self, instance, undo_mgr):
# We didn't get callback from Neutron within given time
LOG.warning(_LW('Timeout waiting for vif plugging callback'),
LOG.warning('Timeout waiting for vif plugging callback',
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
@ -633,8 +633,8 @@ class VMOps(object):
self._update_last_dom_id(vm_ref)
def _neutron_failed_callback(self, event_name, instance):
LOG.warning(_LW('Neutron Reported failure on event %(event)s'),
{'event': event_name}, instance=instance)
LOG.warning('Neutron Reported failure on event %(event)s',
{'event': event_name}, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
@ -1025,9 +1025,8 @@ class VMOps(object):
undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception as error:
LOG.exception(_LE("_migrate_disk_resizing_down failed. "
"Restoring orig vm"),
instance=instance)
LOG.exception(_("_migrate_disk_resizing_down failed. Restoring"
"orig vm"), instance=instance)
undo_mgr._rollback()
raise exception.InstanceFaultRollback(error)
@ -1201,15 +1200,15 @@ class VMOps(object):
transfer_ephemeral_disks_then_all_leaf_vdis()
except Exception as error:
LOG.exception(_LE("_migrate_disk_resizing_up failed. "
"Restoring orig vm due_to: %s."), error,
instance=instance)
LOG.exception(_("_migrate_disk_resizing_up failed. "
"Restoring orig vm due_to: %s."),
error, instance=instance)
try:
self._restore_orig_vm_and_cleanup_orphan(instance)
# TODO(johngarbutt) should also cleanup VHDs at destination
except Exception as rollback_error:
LOG.warning(_LW("_migrate_disk_resizing_up failed to "
"rollback: %s"), rollback_error,
LOG.warning("_migrate_disk_resizing_up failed to "
"rollback: %s", rollback_error,
instance=instance)
raise exception.InstanceFaultRollback(error)
@ -1336,14 +1335,14 @@ class VMOps(object):
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_LI("Starting halted instance found during reboot"),
LOG.info("Starting halted instance found during reboot",
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warning(_LW("Reboot failed due to bad volumes, detaching "
"bad volumes and starting halted instance"),
LOG.warning("Reboot failed due to bad volumes, detaching "
"bad volumes and starting halted instance",
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
@ -1420,7 +1419,7 @@ class VMOps(object):
# Skip the update when not possible, as the updated metadata will
# get added when the VM is being booted up at the end of the
# resize or rebuild.
LOG.warning(_LW("Unable to update metadata, VM not found."),
LOG.warning("Unable to update metadata, VM not found.",
instance=instance, exc_info=True)
return
@ -1540,7 +1539,7 @@ class VMOps(object):
destroy_* methods are internal.
"""
LOG.info(_LI("Destroying VM"), instance=instance)
LOG.info("Destroying VM", instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
@ -1572,7 +1571,7 @@ class VMOps(object):
"""
if vm_ref is None:
LOG.warning(_LW("VM is not present, skipping destroy..."),
LOG.warning("VM is not present, skipping destroy...",
instance=instance)
# NOTE(alaski): There should not be a block device mapping here,
# but if there is it very likely means there was an error cleaning
@ -1593,24 +1592,24 @@ class VMOps(object):
sr_uuid)
if not sr_ref:
connection_data = bdm['connection_info']['data']
(sr_uuid, _, _) = volume_utils.parse_sr_info(
(sr_uuid, unused, unused) = volume_utils.parse_sr_info(
connection_data)
sr_ref = volume_utils.find_sr_by_uuid(self._session,
sr_uuid)
except Exception:
LOG.exception(_LE('Failed to find an SR for volume %s'),
LOG.exception(_('Failed to find an SR for volume %s'),
volume_id, instance=instance)
try:
if sr_ref:
volume_utils.forget_sr(self._session, sr_ref)
else:
LOG.error(_LE('Volume %s is associated with the '
'instance but no SR was found for it'), volume_id,
instance=instance)
LOG.error('Volume %s is associated with the '
'instance but no SR was found for it',
volume_id, instance=instance)
except Exception:
LOG.exception(_LE('Failed to forget the SR for volume %s'),
volume_id, instance=instance)
LOG.exception(_('Failed to forget the SR for volume %s'),
volume_id, instance=instance)
return
# NOTE(alaski): Attempt clean shutdown first if there's an attached
@ -1709,7 +1708,7 @@ class VMOps(object):
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_LW("VM is not present, skipping soft delete..."),
LOG.warning("VM is not present, skipping soft delete...",
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
@ -1758,11 +1757,11 @@ class VMOps(object):
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info)
LOG.info("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds", instances_info)
for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
LOG.info("Automatically hard rebooting", instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
@ -1818,7 +1817,7 @@ class VMOps(object):
raw_console_data = vm_management.get_console_log(
self._session, dom_id)
except self._session.XenAPI.Failure:
LOG.exception(_LE("Guest does not have a console available"))
LOG.exception(_("Guest does not have a console available"))
raise exception.ConsoleNotAvailable()
return zlib.decompress(base64.b64decode(raw_console_data))
@ -2048,15 +2047,15 @@ class VMOps(object):
def _process_plugin_exception(self, plugin_exception, method, instance):
err_msg = plugin_exception.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_LE('TIMEOUT: The call to %s timed out'),
LOG.error('TIMEOUT: The call to %s timed out',
method, instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_LE('NOT IMPLEMENTED: The call to %s is not supported'
' by the agent.'), method, instance=instance)
LOG.error('NOT IMPLEMENTED: The call to %s is not supported'
' by the agent.', method, instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_LE('The call to %(method)s returned an error: %(e)s.'),
LOG.error('The call to %(method)s returned an error: %(e)s.',
{'method': method, 'e': plugin_exception},
instance=instance)
return {'returncode': 'error', 'message': err_msg}
@ -2156,7 +2155,7 @@ class VMOps(object):
nwref,
options)
except self._session.XenAPI.Failure:
LOG.exception(_LE('Migrate Receive failed'))
LOG.exception(_('Migrate Receive failed'))
msg = _('Migrate Receive failed')
raise exception.MigrationPreCheckError(reason=msg)
return migrate_data
@ -2434,7 +2433,7 @@ class VMOps(object):
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure:
LOG.exception(_LE('Migrate Send failed'))
LOG.exception(_('Migrate Send failed'))
raise exception.MigrationError(
reason=_('Migrate Send failed'))
@ -2491,7 +2490,7 @@ class VMOps(object):
if sr_ref:
volume_utils.forget_sr(self._session, sr_ref)
except Exception:
LOG.exception(_LE('Failed to forget the SR for volume %s'),
LOG.exception(_('Failed to forget the SR for volume %s'),
params['id'], instance=instance)
# delete VIF and network in destination host
@ -2505,8 +2504,8 @@ class VMOps(object):
try:
self.vif_driver.delete_network_and_bridge(instance, vif)
except Exception:
LOG.exception(_LE('Failed to delete networks and bridges with '
'VIF %s'), vif['id'], instance=instance)
LOG.exception(_('Failed to delete networks and bridges with '
'VIF %s'), vif['id'], instance=instance)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
@ -2570,13 +2569,13 @@ class VMOps(object):
self.firewall_driver.setup_basic_filtering(instance, [vif])
except exception.NovaException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('attach network interface %s failed.'),
LOG.exception(_('attach network interface %s failed.'),
vif['id'], instance=instance)
try:
self.vif_driver.unplug(instance, vif, vm_ref)
except exception.NovaException:
# if unplug failed, no need to raise exception
LOG.warning(_LW('Unplug VIF %s failed.'),
LOG.warning('Unplug VIF %s failed.',
vif['id'], instance=instance)
_attach_interface(instance, vm_ref, vif)
@ -2589,5 +2588,5 @@ class VMOps(object):
self.vif_driver.unplug(instance, vif, vm_ref)
except exception.NovaException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('detach network interface %s failed.'),
LOG.exception(_('detach network interface %s failed.'),
vif['id'], instance=instance)

View File

@ -30,8 +30,7 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LW
from nova.i18n import _
CONF = nova.conf.CONF
@ -176,7 +175,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
session.call_xenapi("SR.scan", sr_ref)
vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to introduce VDI on SR'))
LOG.exception(_('Unable to introduce VDI on SR'))
raise exception.StorageError(
reason=_('Unable to introduce VDI on SR %s') % sr_ref)
@ -191,7 +190,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to get record of VDI'))
LOG.exception(_('Unable to get record of VDI'))
raise exception.StorageError(
reason=_('Unable to get record of VDI %s on') % vdi_ref)
@ -213,7 +212,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to introduce VDI for SR'))
LOG.exception(_('Unable to introduce VDI for SR'))
raise exception.StorageError(
reason=_('Unable to introduce VDI for SR %s') % sr_ref)
@ -242,7 +241,7 @@ def purge_sr(session, sr_ref):
for vdi_ref in vdi_refs:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
if vbd_refs:
LOG.warning(_LW('Cannot purge SR with referenced VDIs'))
LOG.warning('Cannot purge SR with referenced VDIs')
return
forget_sr(session, sr_ref)
@ -259,16 +258,16 @@ def _unplug_pbds(session, sr_ref):
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure as exc:
LOG.warning(_LW('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref})
LOG.warning('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s', {'exc': exc, 'sr_ref': sr_ref})
return
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure as exc:
LOG.warning(_LW('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd})
LOG.warning('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s', {'exc': exc, 'pbd': pbd})
def get_device_number(mountpoint):
@ -291,7 +290,7 @@ def _mountpoint_to_number(mountpoint):
elif re.match('^[0-9]+$', mountpoint):
return int(mountpoint, 10)
else:
LOG.warning(_LW('Mountpoint cannot be translated: %s'), mountpoint)
LOG.warning('Mountpoint cannot be translated: %s', mountpoint)
return -1
@ -311,7 +310,7 @@ def find_sr_from_vbd(session, vbd_ref):
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to find SR from VBD'))
LOG.exception(_('Unable to find SR from VBD'))
raise exception.StorageError(
reason=_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
@ -322,7 +321,7 @@ def find_sr_from_vdi(session, vdi_ref):
try:
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to find SR from VDI'))
LOG.exception(_('Unable to find SR from VDI'))
raise exception.StorageError(
reason=_('Unable to find SR from VDI %s') % vdi_ref)
return sr_ref
@ -393,6 +392,5 @@ def stream_to_vdi(session, instance, disk_format,
_stream_to_vdi(conn, vdi_import_path, file_size, file_obj)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Streaming disk to VDI failed '
'with error: %s'),
LOG.error('Streaming disk to VDI failed with error: %s',
e, instance=instance)

View File

@ -22,7 +22,6 @@ from oslo_utils import excutils
from oslo_utils import strutils
from nova import exception
from nova.i18n import _LI, _LW
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
@ -59,7 +58,7 @@ class VolumeOps(object):
vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)
LOG.info('Connected volume (vdi_uuid): %s', vdi_uuid)
if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
@ -127,8 +126,8 @@ class VolumeOps(object):
LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info(_LI('Dev %(dev_number)s attached to'
' instance %(instance_name)s'),
LOG.info('Dev %(dev_number)s attached to'
' instance %(instance_name)s',
{'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint):
@ -145,12 +144,12 @@ class VolumeOps(object):
if vbd_ref is None:
# NOTE(sirp): If we don't find the VBD then it must have been
# detached previously.
LOG.warning(_LW('Skipping detach because VBD for %s was '
'not found'), instance_name)
LOG.warning('Skipping detach because VBD for %s was not found',
instance_name)
else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s'),
LOG.info('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s',
{'instance_name': instance_name,
'mountpoint': mountpoint})