Replacement `_` on `_LW` in all LOG.warning part 4
oslo.i18n uses different marker functions to separate the translatable messages into different catalogs, which the translation teams can prioritize translating. For details, please refer to: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#guidelines-for-use-in-openstack There were not marker fuctions some places in directory network. This commit makes changes: * Add missing marker functions * Use ',' instead of '%' while adding variables to log messages Change-Id: I913077d3b0fdee78e423c35b3a48137a17946a7b
This commit is contained in:
parent
1e8df2f00b
commit
76953c00c3
|
@ -309,7 +309,7 @@ def validate_log_translations(logical_line, physical_line, filename):
|
|||
"plugins/xenserver/xenapi/etc/xapi.d" in filename or
|
||||
# TODO(Mike_D):Needs to be remove with:
|
||||
# I075ab2a522272f2082c292dfedc877abd8ebe328
|
||||
"nova/virt" in filename):
|
||||
"nova/virt/libvirt" in filename):
|
||||
return
|
||||
if pep8.noqa(physical_line):
|
||||
return
|
||||
|
|
|
@ -175,10 +175,10 @@ def required_by(instance):
|
|||
image_prop = utils.instance_sys_meta(instance).get(
|
||||
utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive', 'optional')
|
||||
if image_prop not in ['optional', 'mandatory']:
|
||||
LOG.warn(_LW('Image config drive option %(image_prop)s is invalid '
|
||||
'and will be ignored') %
|
||||
{'image_prop': image_prop},
|
||||
instance=instance)
|
||||
LOG.warning(_LW('Image config drive option %(image_prop)s is invalid '
|
||||
'and will be ignored'),
|
||||
{'image_prop': image_prop},
|
||||
instance=instance)
|
||||
|
||||
return (instance.get('config_drive') or
|
||||
'always' == CONF.force_config_drive or
|
||||
|
|
|
@ -363,8 +363,8 @@ def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
|
|||
inject_val = locals()[inject]
|
||||
if inject_val:
|
||||
raise
|
||||
LOG.warn(_LW('Ignoring error injecting data into image %(image)s '
|
||||
'(%(e)s)'), {'image': image, 'e': e})
|
||||
LOG.warning(_LW('Ignoring error injecting data into image %(image)s '
|
||||
'(%(e)s)'), {'image': image, 'e': e})
|
||||
return False
|
||||
|
||||
try:
|
||||
|
@ -453,8 +453,8 @@ def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
|
|||
except Exception as e:
|
||||
if inject in mandatory:
|
||||
raise
|
||||
LOG.warn(_LW('Ignoring error injecting %(inject)s into image '
|
||||
'(%(e)s)'), {'inject': inject, 'e': e})
|
||||
LOG.warning(_LW('Ignoring error injecting %(inject)s into '
|
||||
'image (%(e)s)'), {'inject': inject, 'e': e})
|
||||
status = False
|
||||
return status
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import time
|
|||
|
||||
from oslo.utils import importutils
|
||||
|
||||
from nova.i18n import _, _LI
|
||||
from nova.i18n import _, _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
||||
|
@ -121,7 +121,8 @@ class Mount(object):
|
|||
LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.'))
|
||||
time.sleep(2)
|
||||
if time.time() - start_time > MAX_DEVICE_WAIT:
|
||||
LOG.warn(_('Device allocation failed after repeated retries.'))
|
||||
LOG.warning(_LW('Device allocation failed after repeated '
|
||||
'retries.'))
|
||||
return False
|
||||
device = self._inner_get_dev()
|
||||
return True
|
||||
|
|
|
@ -20,7 +20,7 @@ import time
|
|||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt.disk.mount import api
|
||||
|
@ -56,7 +56,7 @@ class NbdMount(api.Mount):
|
|||
else:
|
||||
LOG.error(_LE('NBD error - previous umount did not '
|
||||
'cleanup /var/lock/qemu-nbd-%s.'), device)
|
||||
LOG.warn(_('No free nbd devices'))
|
||||
LOG.warning(_LW('No free nbd devices'))
|
||||
return None
|
||||
|
||||
def _allocate_nbd(self):
|
||||
|
@ -107,8 +107,8 @@ class NbdMount(api.Mount):
|
|||
_out, err = utils.trycmd('qemu-nbd', '-d', device,
|
||||
run_as_root=True)
|
||||
if err:
|
||||
LOG.warn(_('Detaching from erroneous nbd device returned '
|
||||
'error: %s'), err)
|
||||
LOG.warning(_LW('Detaching from erroneous nbd device returned '
|
||||
'error: %s'), err)
|
||||
return False
|
||||
|
||||
self.error = ''
|
||||
|
|
|
@ -189,8 +189,8 @@ class VFSGuestFS(vfs.VFS):
|
|||
except AttributeError as ex:
|
||||
# set_backend_settings method doesn't exist in older
|
||||
# libguestfs versions, so nothing we can do but ignore
|
||||
LOG.warn(_LW("Unable to force TCG mode, libguestfs too old? %s"),
|
||||
ex)
|
||||
LOG.warning(_LW("Unable to force TCG mode, "
|
||||
"libguestfs too old? %s"), ex)
|
||||
pass
|
||||
|
||||
try:
|
||||
|
@ -222,7 +222,7 @@ class VFSGuestFS(vfs.VFS):
|
|||
try:
|
||||
self.handle.aug_close()
|
||||
except RuntimeError as e:
|
||||
LOG.warn(_("Failed to close augeas %s"), e)
|
||||
LOG.warning(_LW("Failed to close augeas %s"), e)
|
||||
|
||||
try:
|
||||
self.handle.shutdown()
|
||||
|
@ -230,7 +230,7 @@ class VFSGuestFS(vfs.VFS):
|
|||
# Older libguestfs versions haven't an explicit shutdown
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
LOG.warn(_("Failed to shutdown appliance %s"), e)
|
||||
LOG.warning(_LW("Failed to shutdown appliance %s"), e)
|
||||
|
||||
try:
|
||||
self.handle.close()
|
||||
|
@ -238,7 +238,7 @@ class VFSGuestFS(vfs.VFS):
|
|||
# Older libguestfs versions haven't an explicit close
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
LOG.warn(_("Failed to close guest handle %s"), e)
|
||||
LOG.warning(_LW("Failed to close guest handle %s"), e)
|
||||
finally:
|
||||
# dereference object and implicitly close()
|
||||
self.handle = None
|
||||
|
|
|
@ -36,7 +36,7 @@ from nova.compute import vm_mode
|
|||
from nova.console import type as ctype
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt import diagnostics
|
||||
|
@ -229,7 +229,7 @@ class FakeDriver(driver.ComputeDriver):
|
|||
if key in self.instances:
|
||||
del self.instances[key]
|
||||
else:
|
||||
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
|
||||
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"),
|
||||
{'key': key,
|
||||
'inst': self.instances}, instance=instance)
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import os
|
|||
from oslo.config import cfg
|
||||
|
||||
from nova.compute import task_states
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LW
|
||||
from nova.image import glance
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt.hyperv import utilsfactory
|
||||
|
@ -116,8 +116,8 @@ class SnapshotOps(object):
|
|||
self._vmutils.remove_vm_snapshot(snapshot_path)
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
LOG.warning(_('Failed to remove snapshot for VM %s')
|
||||
% instance_name)
|
||||
LOG.warning(_LW('Failed to remove snapshot for VM %s'),
|
||||
instance_name)
|
||||
if export_dir:
|
||||
LOG.debug('Removing directory: %s', export_dir)
|
||||
self._pathutils.rmtree(export_dir)
|
||||
|
|
|
@ -441,9 +441,11 @@ class VMUtils(object):
|
|||
disk_found = True
|
||||
break
|
||||
if not disk_found:
|
||||
LOG.warn(_LW('Disk not found on controller "%(controller_path)s" '
|
||||
'with address "%(address)s"'),
|
||||
{'controller_path': controller_path, 'address': address})
|
||||
LOG.warning(_LW('Disk not found on controller '
|
||||
'"%(controller_path)s" with '
|
||||
'address "%(address)s"'),
|
||||
{'controller_path': controller_path,
|
||||
'address': address})
|
||||
|
||||
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
|
||||
nic_data = self._get_nic_data_by_name(nic_name)
|
||||
|
|
|
@ -24,7 +24,7 @@ from oslo.config import cfg
|
|||
from oslo.utils import excutils
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt import driver
|
||||
from nova.virt.hyperv import constants
|
||||
|
@ -217,8 +217,8 @@ class VolumeOps(object):
|
|||
if not self._initiator:
|
||||
self._initiator = self._volutils.get_iscsi_initiator()
|
||||
if not self._initiator:
|
||||
LOG.warn(_('Could not determine iscsi initiator name'),
|
||||
instance=instance)
|
||||
LOG.warning(_LW('Could not determine iscsi initiator name'),
|
||||
instance=instance)
|
||||
return {
|
||||
'ip': CONF.my_block_storage_ip,
|
||||
'host': CONF.host,
|
||||
|
|
|
@ -116,5 +116,5 @@ class IronicClientWrapper(object):
|
|||
if attempt == num_attempts:
|
||||
LOG.error(msg)
|
||||
raise exception.NovaException(msg)
|
||||
LOG.warning(msg)
|
||||
LOG.warn(msg)
|
||||
time.sleep(CONF.ironic.api_retry_interval)
|
||||
|
|
|
@ -202,7 +202,7 @@ class IronicDriver(virt_driver.ComputeDriver):
|
|||
except exception.InvalidArchitectureName:
|
||||
cpu_arch = None
|
||||
if not cpu_arch:
|
||||
LOG.warn(_LW("cpu_arch not defined for node '%s'"), node.uuid)
|
||||
LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid)
|
||||
|
||||
nodes_extra_specs = {}
|
||||
|
||||
|
@ -230,8 +230,8 @@ class IronicDriver(virt_driver.ComputeDriver):
|
|||
if len(parts) == 2 and parts[0] and parts[1]:
|
||||
nodes_extra_specs[parts[0]] = parts[1]
|
||||
else:
|
||||
LOG.warn(_LW("Ignoring malformed capability '%s'. "
|
||||
"Format should be 'key:val'."), capability)
|
||||
LOG.warning(_LW("Ignoring malformed capability '%s'. "
|
||||
"Format should be 'key:val'."), capability)
|
||||
|
||||
vcpus_used = 0
|
||||
memory_mb_used = 0
|
||||
|
@ -528,17 +528,17 @@ class IronicDriver(virt_driver.ComputeDriver):
|
|||
|
||||
memory_kib = int(node.properties.get('memory_mb', 0)) * 1024
|
||||
if memory_kib == 0:
|
||||
LOG.warn(_LW("Warning, memory usage is 0 for "
|
||||
"%(instance)s on baremetal node %(node)s."),
|
||||
{'instance': instance.uuid,
|
||||
'node': instance.node})
|
||||
LOG.warning(_LW("Warning, memory usage is 0 for "
|
||||
"%(instance)s on baremetal node %(node)s."),
|
||||
{'instance': instance.uuid,
|
||||
'node': instance.node})
|
||||
|
||||
num_cpu = node.properties.get('cpus', 0)
|
||||
if num_cpu == 0:
|
||||
LOG.warn(_LW("Warning, number of cpus is 0 for "
|
||||
"%(instance)s on baremetal node %(node)s."),
|
||||
{'instance': instance.uuid,
|
||||
'node': instance.node})
|
||||
LOG.warning(_LW("Warning, number of cpus is 0 for "
|
||||
"%(instance)s on baremetal node %(node)s."),
|
||||
{'instance': instance.uuid,
|
||||
'node': instance.node})
|
||||
|
||||
return hardware.InstanceInfo(state=map_power_state(node.power_state),
|
||||
max_mem_kb=memory_kib,
|
||||
|
|
|
@ -19,7 +19,7 @@ import time
|
|||
from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
||||
|
@ -62,7 +62,7 @@ def register_storage_use(storage_path, hostname):
|
|||
try:
|
||||
d = jsonutils.loads(f.read())
|
||||
except ValueError:
|
||||
LOG.warning(_("Cannot decode JSON from %(id_path)s"),
|
||||
LOG.warning(_LW("Cannot decode JSON from %(id_path)s"),
|
||||
{"id_path": id_path})
|
||||
|
||||
d[hostname] = time.time()
|
||||
|
@ -90,7 +90,7 @@ def get_storage_users(storage_path):
|
|||
try:
|
||||
d = jsonutils.loads(f.read())
|
||||
except ValueError:
|
||||
LOG.warning(_("Cannot decode JSON from %(id_path)s"),
|
||||
LOG.warning(_LW("Cannot decode JSON from %(id_path)s"),
|
||||
{"id_path": id_path})
|
||||
|
||||
recent_users = []
|
||||
|
|
|
@ -160,8 +160,8 @@ class VMwareVCDriver(driver.ComputeDriver):
|
|||
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
|
||||
missing_clusters = set(self._cluster_names) - set(clusters_found)
|
||||
if missing_clusters:
|
||||
LOG.warn(_LW("The following clusters could not be found in the "
|
||||
"vCenter %s") % list(missing_clusters))
|
||||
LOG.warning(_LW("The following clusters could not be found in the "
|
||||
"vCenter %s"), list(missing_clusters))
|
||||
|
||||
# The _resources is used to maintain the vmops, volumeops and vcstate
|
||||
# objects per cluster
|
||||
|
@ -252,8 +252,8 @@ class VMwareVCDriver(driver.ComputeDriver):
|
|||
# anything if it is.
|
||||
instances = self.list_instances()
|
||||
if instance['uuid'] not in instances:
|
||||
LOG.warn(_LW('Instance cannot be found in host, or in an unknown'
|
||||
'state.'), instance=instance)
|
||||
LOG.warning(_LW('Instance cannot be found in host, or in an '
|
||||
'unknown state.'), instance=instance)
|
||||
else:
|
||||
state = vm_util.get_vm_state_from_name(self._session,
|
||||
instance['uuid'])
|
||||
|
|
|
@ -40,7 +40,7 @@ from oslo.config import cfg
|
|||
from oslo.utils import timeutils
|
||||
from oslo.vmware import exceptions as vexc
|
||||
|
||||
from nova.i18n import _, _LI
|
||||
from nova.i18n import _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt import imagecache
|
||||
from nova.virt.vmwareapi import ds_util
|
||||
|
@ -71,7 +71,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
|
|||
vexc.FileLockedException) as e:
|
||||
# There may be more than one process or thread that tries
|
||||
# to delete the file.
|
||||
LOG.warning(_("Unable to delete %(file)s. Exception: %(ex)s"),
|
||||
LOG.warning(_LW("Unable to delete %(file)s. Exception: %(ex)s"),
|
||||
{'file': ds_path, 'ex': e})
|
||||
except vexc.FileNotFoundException:
|
||||
LOG.debug("File not found: %s", ds_path)
|
||||
|
|
|
@ -19,7 +19,7 @@ from oslo.config import cfg
|
|||
from oslo.vmware import exceptions as vexc
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LW
|
||||
from nova.network import model
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt.vmwareapi import network_util
|
||||
|
@ -115,8 +115,8 @@ def _get_network_ref_from_opaque(opaque_networks, integration_bridge, bridge):
|
|||
'network-id': network['opaqueNetworkId'],
|
||||
'network-name': network['opaqueNetworkName'],
|
||||
'network-type': network['opaqueNetworkType']}
|
||||
LOG.warning(_("No valid network found in %(opaque)s, from %(bridge)s "
|
||||
"or %(integration_bridge)s"),
|
||||
LOG.warning(_LW("No valid network found in %(opaque)s, from %(bridge)s "
|
||||
"or %(integration_bridge)s"),
|
||||
{'opaque': opaque_networks, 'bridge': bridge,
|
||||
'integration_bridge': integration_bridge})
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ from oslo.config import cfg
|
|||
from oslo.vmware import vim_util as vutil
|
||||
import suds
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
vmware_opts = cfg.IntOpt('maximum_objects', default=100,
|
||||
|
@ -112,8 +112,8 @@ def get_dynamic_properties(vim, mobj, type, property_names):
|
|||
# The object may have information useful for logging
|
||||
if hasattr(obj_content.objects[0], 'missingSet'):
|
||||
for m in obj_content.objects[0].missingSet:
|
||||
LOG.warning(_("Unable to retrieve value for %(path)s "
|
||||
"Reason: %(reason)s"),
|
||||
LOG.warning(_LW("Unable to retrieve value for %(path)s "
|
||||
"Reason: %(reason)s"),
|
||||
{'path': m.path,
|
||||
'reason': m.fault.localizedMessage})
|
||||
return property_dict
|
||||
|
|
|
@ -1099,7 +1099,7 @@ def get_all_cluster_mors(session):
|
|||
return results.objects
|
||||
|
||||
except Exception as excep:
|
||||
LOG.warn(_("Failed to get cluster references %s") % excep)
|
||||
LOG.warning(_LW("Failed to get cluster references %s"), excep)
|
||||
|
||||
|
||||
def get_all_res_pool_mors(session):
|
||||
|
@ -1111,7 +1111,7 @@ def get_all_res_pool_mors(session):
|
|||
_cancel_retrieve_if_necessary(session, results)
|
||||
return results.objects
|
||||
except Exception as excep:
|
||||
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
|
||||
LOG.warning(_LW("Failed to get resource pool references " "%s"), excep)
|
||||
|
||||
|
||||
def get_dynamic_property_mor(session, mor_ref, attribute):
|
||||
|
@ -1305,8 +1305,8 @@ def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
|
|||
the passed instance.
|
||||
"""
|
||||
if vm_ref is None:
|
||||
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
|
||||
"with vm_ref=None"))
|
||||
LOG.warning(_LW("vmwareapi:vm_util:clone_vmref_for_instance, called "
|
||||
"with vm_ref=None"))
|
||||
raise vexc.MissingParameter(param="vm_ref")
|
||||
# Get the clone vm spec
|
||||
client_factory = session.vim.client.factory
|
||||
|
|
|
@ -373,7 +373,7 @@ class VMwareVMOps(object):
|
|||
# all other exceptions will be raised.
|
||||
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
|
||||
"can lead to unexpected results."),
|
||||
dst_folder_ds_path)
|
||||
dst_folder_ds_path)
|
||||
|
||||
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
|
||||
tmp_dir_loc = tmp_image_ds_loc.parent.parent
|
||||
|
@ -818,7 +818,7 @@ class VMwareVMOps(object):
|
|||
try:
|
||||
vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
|
||||
if vm_ref is None:
|
||||
LOG.warning(_('Instance does not exist on backend'),
|
||||
LOG.warning(_LW('Instance does not exist on backend'),
|
||||
instance=instance)
|
||||
return
|
||||
lst_properties = ["config.files.vmPathName", "runtime.powerState",
|
||||
|
@ -847,9 +847,9 @@ class VMwareVMOps(object):
|
|||
"UnregisterVM", vm_ref)
|
||||
LOG.debug("Unregistered the VM", instance=instance)
|
||||
except Exception as excep:
|
||||
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
|
||||
"exception while un-registering the VM: %s"),
|
||||
excep)
|
||||
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, got "
|
||||
"this exception while un-registering the VM: "
|
||||
"%s"), excep)
|
||||
# Delete the folder holding the VM related content on
|
||||
# the datastore.
|
||||
if destroy_disks and vm_ds_path:
|
||||
|
@ -870,9 +870,9 @@ class VMwareVMOps(object):
|
|||
{'datastore_name': vm_ds_path.datastore},
|
||||
instance=instance)
|
||||
except Exception:
|
||||
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
|
||||
"exception while deleting the VM contents from "
|
||||
"the disk"), exc_info=True)
|
||||
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, "
|
||||
"exception while deleting the VM contents "
|
||||
"from the disk"), exc_info=True)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc, instance=instance)
|
||||
finally:
|
||||
|
@ -1117,8 +1117,8 @@ class VMwareVMOps(object):
|
|||
self._session._wait_for_task(destroy_task)
|
||||
LOG.debug("Destroyed the VM", instance=instance)
|
||||
except Exception as excep:
|
||||
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
|
||||
"exception while destroying the VM: %s"), excep)
|
||||
LOG.warning(_LW("In vmwareapi:vmops:confirm_migration, got this "
|
||||
"exception while destroying the VM: %s"), excep)
|
||||
|
||||
def finish_revert_migration(self, context, instance, network_info,
|
||||
block_device_info, power_on=True):
|
||||
|
@ -1581,8 +1581,8 @@ class VMwareVMOps(object):
|
|||
str(vi.cache_image_path),
|
||||
str(sized_disk_ds_loc))
|
||||
except Exception as e:
|
||||
LOG.warning(_("Root disk file creation "
|
||||
"failed - %s"), e)
|
||||
LOG.warning(_LW("Root disk file creation "
|
||||
"failed - %s"), e)
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to copy cached '
|
||||
'image %(source)s to '
|
||||
|
|
|
@ -30,7 +30,7 @@ from nova.compute import utils as compute_utils
|
|||
from nova import context
|
||||
from nova import crypto
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
@ -191,8 +191,8 @@ class XenAPIBasedAgent(object):
|
|||
self.vm_ref = vm_ref
|
||||
|
||||
def _add_instance_fault(self, error, exc_info):
|
||||
LOG.warning(_("Ignoring error while configuring instance with "
|
||||
"agent: %s") % error,
|
||||
LOG.warning(_LW("Ignoring error while configuring instance with "
|
||||
"agent: %s"), error,
|
||||
instance=self.instance, exc_info=True)
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
|
@ -267,8 +267,8 @@ class XenAPIBasedAgent(object):
|
|||
self._call_agent('agentupdate', args)
|
||||
except exception.AgentError as exc:
|
||||
# Silently fail for agent upgrades
|
||||
LOG.warning(_("Unable to update the agent due "
|
||||
"to: %(exc)s") % dict(exc=exc),
|
||||
LOG.warning(_LW("Unable to update the agent due "
|
||||
"to: %(exc)s"), dict(exc=exc),
|
||||
instance=self.instance)
|
||||
|
||||
def _exchange_key_with_agent(self):
|
||||
|
@ -419,9 +419,9 @@ def should_use_agent(instance):
|
|||
try:
|
||||
return strutils.bool_from_string(use_agent_raw, strict=True)
|
||||
except ValueError:
|
||||
LOG.warn(_("Invalid 'agent_present' value. "
|
||||
"Falling back to the default."),
|
||||
instance=instance)
|
||||
LOG.warning(_LW("Invalid 'agent_present' value. "
|
||||
"Falling back to the default."),
|
||||
instance=instance)
|
||||
return CONF.xenserver.use_agent_default
|
||||
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ from oslo.config import cfg
|
|||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import versionutils
|
||||
|
@ -236,17 +236,18 @@ class XenAPISession(object):
|
|||
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
|
||||
except self.XenAPI.Failure as exc:
|
||||
if self._is_retryable_exception(exc, fn):
|
||||
LOG.warn(_('%(plugin)s.%(fn)s failed. Retrying call.')
|
||||
% {'plugin': plugin, 'fn': fn})
|
||||
LOG.warning(_LW('%(plugin)s.%(fn)s failed. '
|
||||
'Retrying call.'),
|
||||
{'plugin': plugin, 'fn': fn})
|
||||
if retry_cb:
|
||||
retry_cb(exc=exc)
|
||||
else:
|
||||
raise
|
||||
except socket.error as exc:
|
||||
if exc.errno == errno.ECONNRESET:
|
||||
LOG.warn(_('Lost connection to XenAPI during call to '
|
||||
'%(plugin)s.%(fn)s. Retrying call.') %
|
||||
{'plugin': plugin, 'fn': fn})
|
||||
LOG.warning(_LW('Lost connection to XenAPI during call to '
|
||||
'%(plugin)s.%(fn)s. Retrying call.'),
|
||||
{'plugin': plugin, 'fn': fn})
|
||||
if retry_cb:
|
||||
retry_cb(exc=exc)
|
||||
else:
|
||||
|
|
|
@ -30,7 +30,7 @@ from oslo.serialization import jsonutils
|
|||
from oslo.utils import units
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt import driver
|
||||
|
@ -383,8 +383,8 @@ class XenAPIDriver(driver.ComputeDriver):
|
|||
self._initiator = stats['host_other-config']['iscsi_iqn']
|
||||
self._hypervisor_hostname = stats['host_hostname']
|
||||
except (TypeError, KeyError) as err:
|
||||
LOG.warn(_('Could not determine key: %s') % err,
|
||||
instance=instance)
|
||||
LOG.warning(_LW('Could not determine key: %s'), err,
|
||||
instance=instance)
|
||||
self._initiator = None
|
||||
return {
|
||||
'ip': self._get_block_storage_ip(),
|
||||
|
|
|
@ -29,7 +29,7 @@ from nova.compute import vm_mode
|
|||
from nova.compute import vm_states
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.pci import whitelist as pci_whitelist
|
||||
|
@ -291,8 +291,8 @@ def to_supported_instances(host_capabilities):
|
|||
|
||||
result.append((guestarch, hv_type.XEN, ostype))
|
||||
except ValueError:
|
||||
LOG.warning(
|
||||
_("Failed to extract instance support from %s"), capability)
|
||||
LOG.warning(_LW("Failed to extract instance support from %s"),
|
||||
capability)
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ from nova.compute import power_state
|
|||
from nova.compute import task_states
|
||||
from nova.compute import vm_mode
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova.network import model as network_model
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import versionutils
|
||||
|
@ -323,8 +323,8 @@ def destroy_vm(session, instance, vm_ref):
|
|||
|
||||
def clean_shutdown_vm(session, instance, vm_ref):
|
||||
if is_vm_shutdown(session, vm_ref):
|
||||
LOG.warn(_("VM already halted, skipping shutdown..."),
|
||||
instance=instance)
|
||||
LOG.warning(_LW("VM already halted, skipping shutdown..."),
|
||||
instance=instance)
|
||||
return True
|
||||
|
||||
LOG.debug("Shutting down VM (cleanly)", instance=instance)
|
||||
|
@ -338,8 +338,8 @@ def clean_shutdown_vm(session, instance, vm_ref):
|
|||
|
||||
def hard_shutdown_vm(session, instance, vm_ref):
|
||||
if is_vm_shutdown(session, vm_ref):
|
||||
LOG.warn(_("VM already halted, skipping shutdown..."),
|
||||
instance=instance)
|
||||
LOG.warning(_LW("VM already halted, skipping shutdown..."),
|
||||
instance=instance)
|
||||
return True
|
||||
|
||||
LOG.debug("Shutting down VM (hard)", instance=instance)
|
||||
|
@ -842,7 +842,7 @@ def _find_cached_image(session, image_id, sr_ref):
|
|||
number_found = len(recs)
|
||||
if number_found > 0:
|
||||
if number_found > 1:
|
||||
LOG.warn(_("Multiple base images for image: %s") % image_id)
|
||||
LOG.warning(_LW("Multiple base images for image: %s"), image_id)
|
||||
return recs.keys()[0]
|
||||
|
||||
|
||||
|
@ -1232,9 +1232,9 @@ def _create_cached_image(context, session, instance, name_label,
|
|||
sr_type = session.call_xenapi('SR.get_type', sr_ref)
|
||||
|
||||
if CONF.use_cow_images and sr_type != "ext":
|
||||
LOG.warning(_("Fast cloning is only supported on default local SR "
|
||||
"of type ext. SR on this system was found to be of "
|
||||
"type %s. Ignoring the cow flag."), sr_type)
|
||||
LOG.warning(_LW("Fast cloning is only supported on default local SR "
|
||||
"of type ext. SR on this system was found to be of "
|
||||
"type %s. Ignoring the cow flag."), sr_type)
|
||||
|
||||
@utils.synchronized('xenapi-image-cache' + image_id)
|
||||
def _create_cached_image_impl(context, session, instance, name_label,
|
||||
|
@ -1307,8 +1307,8 @@ def create_image(context, session, instance, name_label, image_id,
|
|||
elif cache_images == 'none':
|
||||
cache = False
|
||||
else:
|
||||
LOG.warning(_("Unrecognized cache_images value '%s', defaulting to"
|
||||
" True"), CONF.xenserver.cache_images)
|
||||
LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to"
|
||||
" True"), CONF.xenserver.cache_images)
|
||||
cache = True
|
||||
|
||||
# Fetch (and cache) the image
|
||||
|
@ -1383,7 +1383,7 @@ def _image_uses_bittorrent(context, instance):
|
|||
elif torrent_images == 'none':
|
||||
pass
|
||||
else:
|
||||
LOG.warning(_("Invalid value '%s' for torrent_images"),
|
||||
LOG.warning(_LW("Invalid value '%s' for torrent_images"),
|
||||
torrent_images)
|
||||
|
||||
return bittorrent
|
||||
|
@ -1406,8 +1406,8 @@ def _choose_download_handler(context, instance):
|
|||
def get_compression_level():
|
||||
level = CONF.xenserver.image_compression_level
|
||||
if level is not None and (level < 1 or level > 9):
|
||||
LOG.warn(_("Invalid value '%d' for image_compression_level"),
|
||||
level)
|
||||
LOG.warning(_LW("Invalid value '%d' for image_compression_level"),
|
||||
level)
|
||||
return None
|
||||
return level
|
||||
|
||||
|
@ -1843,8 +1843,8 @@ def _scan_sr(session, sr_ref=None, max_attempts=4):
|
|||
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
|
||||
if attempt < max_attempts:
|
||||
ctxt.reraise = False
|
||||
LOG.warn(_("Retry SR scan due to error: %s")
|
||||
% exc)
|
||||
LOG.warning(_LW("Retry SR scan due to error: "
|
||||
"%s"), exc)
|
||||
greenthread.sleep(2 ** attempt)
|
||||
attempt += 1
|
||||
do_scan(sr_ref)
|
||||
|
@ -1876,8 +1876,8 @@ def _find_sr(session):
|
|||
filter_pattern = tokens[1]
|
||||
except IndexError:
|
||||
# oops, flag is invalid
|
||||
LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
|
||||
"formatting convention"),
|
||||
LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect "
|
||||
"formatting convention"),
|
||||
CONF.xenserver.sr_matching_filter)
|
||||
return None
|
||||
|
||||
|
@ -2583,14 +2583,14 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
|
|||
"""
|
||||
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
|
||||
if not boot_menu_url:
|
||||
LOG.warn(_('ipxe_boot_menu_url not set, user will have to'
|
||||
' enter URL manually...'), instance=instance)
|
||||
LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to'
|
||||
' enter URL manually...'), instance=instance)
|
||||
return
|
||||
|
||||
network_name = CONF.xenserver.ipxe_network_name
|
||||
if not network_name:
|
||||
LOG.warn(_('ipxe_network_name not set, user will have to'
|
||||
' enter IP manually...'), instance=instance)
|
||||
LOG.warning(_LW('ipxe_network_name not set, user will have to'
|
||||
' enter IP manually...'), instance=instance)
|
||||
return
|
||||
|
||||
network = None
|
||||
|
@ -2600,9 +2600,9 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
|
|||
break
|
||||
|
||||
if not network:
|
||||
LOG.warn(_("Unable to find network matching '%(network_name)s', user"
|
||||
" will have to enter IP manually...") %
|
||||
{'network_name': network_name}, instance=instance)
|
||||
LOG.warning(_LW("Unable to find network matching '%(network_name)s', "
|
||||
"user will have to enter IP manually..."),
|
||||
{'network_name': network_name}, instance=instance)
|
||||
return
|
||||
|
||||
sr_path = get_sr_path(session)
|
||||
|
@ -2624,8 +2624,8 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
|
|||
except session.XenAPI.Failure as exc:
|
||||
_type, _method, error = exc.details[:3]
|
||||
if error == 'CommandNotFound':
|
||||
LOG.warn(_("ISO creation tool '%s' does not exist.") %
|
||||
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
|
||||
LOG.warning(_LW("ISO creation tool '%s' does not exist."),
|
||||
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ from nova.compute import vm_states
|
|||
from nova.console import type as ctype
|
||||
from nova import context as nova_context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.pci import manager as pci_manager
|
||||
|
@ -423,8 +423,8 @@ class VMOps(object):
|
|||
vm_utils.handle_ipxe_iso(
|
||||
self._session, instance, vdis['iso'], network_info)
|
||||
else:
|
||||
LOG.warning(_('ipxe_boot is True but no ISO image found'),
|
||||
instance=instance)
|
||||
LOG.warning(_LW('ipxe_boot is True but no ISO image '
|
||||
'found'), instance=instance)
|
||||
|
||||
if resize:
|
||||
self._resize_up_vdis(instance, vdis)
|
||||
|
@ -1082,9 +1082,9 @@ class VMOps(object):
|
|||
self._restore_orig_vm_and_cleanup_orphan(instance)
|
||||
# TODO(johngarbutt) should also cleanup VHDs at destination
|
||||
except Exception as rollback_error:
|
||||
LOG.warn(_("_migrate_disk_resizing_up failed to "
|
||||
"rollback: %s"), rollback_error,
|
||||
instance=instance)
|
||||
LOG.warning(_LW("_migrate_disk_resizing_up failed to "
|
||||
"rollback: %s"), rollback_error,
|
||||
instance=instance)
|
||||
raise exception.InstanceFaultRollback(error)
|
||||
|
||||
def _apply_orig_vm_name_label(self, instance, vm_ref):
|
||||
|
@ -1216,9 +1216,9 @@ class VMOps(object):
|
|||
bad_volumes_callback=bad_volumes_callback)
|
||||
return
|
||||
elif details[0] == 'SR_BACKEND_FAILURE_46':
|
||||
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
|
||||
" volumes and starting halted instance"),
|
||||
instance=instance)
|
||||
LOG.warning(_LW("Reboot failed due to bad volumes, detaching "
|
||||
"bad volumes and starting halted instance"),
|
||||
instance=instance)
|
||||
self._start(instance, vm_ref=vm_ref,
|
||||
bad_volumes_callback=bad_volumes_callback)
|
||||
return
|
||||
|
@ -1294,8 +1294,8 @@ class VMOps(object):
|
|||
# Skip the update when not possible, as the updated metadata will
|
||||
# get added when the VM is being booted up at the end of the
|
||||
# resize or rebuild.
|
||||
LOG.warn(_("Unable to update metadata, VM not found."),
|
||||
instance=instance, exc_info=True)
|
||||
LOG.warning(_LW("Unable to update metadata, VM not found."),
|
||||
instance=instance, exc_info=True)
|
||||
return
|
||||
|
||||
def process_change(location, change):
|
||||
|
@ -1446,7 +1446,7 @@ class VMOps(object):
|
|||
|
||||
"""
|
||||
if vm_ref is None:
|
||||
LOG.warning(_("VM is not present, skipping destroy..."),
|
||||
LOG.warning(_LW("VM is not present, skipping destroy..."),
|
||||
instance=instance)
|
||||
# NOTE(alaski): There should not be a block device mapping here,
|
||||
# but if there is it very likely means there was an error cleaning
|
||||
|
@ -1567,7 +1567,7 @@ class VMOps(object):
|
|||
try:
|
||||
vm_ref = self._get_vm_opaque_ref(instance)
|
||||
except exception.NotFound:
|
||||
LOG.warning(_("VM is not present, skipping soft delete..."),
|
||||
LOG.warning(_LW("VM is not present, skipping soft delete..."),
|
||||
instance=instance)
|
||||
else:
|
||||
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
|
||||
|
|
|
@ -25,7 +25,7 @@ from eventlet import greenthread
|
|||
from oslo.config import cfg
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
xenapi_volume_utils_opts = [
|
||||
|
@ -226,7 +226,7 @@ def purge_sr(session, sr_ref):
|
|||
for vdi_ref in vdi_refs:
|
||||
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
|
||||
if vbd_refs:
|
||||
LOG.warn(_('Cannot purge SR with referenced VDIs'))
|
||||
LOG.warning(_LW('Cannot purge SR with referenced VDIs'))
|
||||
return
|
||||
|
||||
forget_sr(session, sr_ref)
|
||||
|
@ -243,16 +243,16 @@ def _unplug_pbds(session, sr_ref):
|
|||
try:
|
||||
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
|
||||
except session.XenAPI.Failure as exc:
|
||||
LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
|
||||
' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref})
|
||||
LOG.warning(_LW('Ignoring exception %(exc)s when getting PBDs'
|
||||
' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref})
|
||||
return
|
||||
|
||||
for pbd in pbds:
|
||||
try:
|
||||
session.call_xenapi("PBD.unplug", pbd)
|
||||
except session.XenAPI.Failure as exc:
|
||||
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
|
||||
' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd})
|
||||
LOG.warning(_LW('Ignoring exception %(exc)s when unplugging'
|
||||
' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd})
|
||||
|
||||
|
||||
def get_device_number(mountpoint):
|
||||
|
@ -275,7 +275,7 @@ def _mountpoint_to_number(mountpoint):
|
|||
elif re.match('^[0-9]+$', mountpoint):
|
||||
return string.atoi(mountpoint, 10)
|
||||
else:
|
||||
LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
|
||||
LOG.warning(_LW('Mountpoint cannot be translated: %s'), mountpoint)
|
||||
return -1
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ Management class for Storage-related functions (attach, detach, etc).
|
|||
from oslo.utils import excutils
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LI
|
||||
from nova.i18n import _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt.xenapi import vm_utils
|
||||
from nova.virt.xenapi import volume_utils
|
||||
|
@ -142,8 +142,8 @@ class VolumeOps(object):
|
|||
if vbd_ref is None:
|
||||
# NOTE(sirp): If we don't find the VBD then it must have been
|
||||
# detached previously.
|
||||
LOG.warn(_('Skipping detach because VBD for %s was not found'),
|
||||
instance_name)
|
||||
LOG.warning(_LW('Skipping detach because VBD for %s was '
|
||||
'not found'), instance_name)
|
||||
else:
|
||||
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
|
||||
LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance'
|
||||
|
|
Loading…
Reference in New Issue