Replacement `_` on `_LI` in all LOG.info - part 1
oslo.i18n uses different marker functions to separate the translatable messages into different catalogs, which the translation teams can prioritize translating. For details, please refer to: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#guidelines-for-use-in-openstack There were not marker fuctions some places in directory network. This commit makes changes: * Add missing marker functions * Use ',' instead of '%' while adding variables to log messages Added a hacking rule for the log info about checking translation for it. Change-Id: I96766d723b01082339876ed94bbaa77783322b8c
This commit is contained in:
parent
b32ccb7b41
commit
8431670ef8
|
@ -40,6 +40,7 @@ Nova Specific Commandments
|
|||
- [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type()
|
||||
- [N326] Translated messages cannot be concatenated. String should be included in translated message.
|
||||
- [N327] assert_called_once() is not a valid method
|
||||
- [N328] Validate that LOG.info messages use _LI.
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
|
|
|
@ -56,7 +56,9 @@ asse_equal_start_with_none_re = re.compile(
|
|||
r"assertEqual\(None,")
|
||||
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
|
||||
log_translation = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
|
||||
r"(.)*LOG\.(audit|error|warn|warning|critical|exception)\(\s*('|\")")
|
||||
log_translation_info = re.compile(
|
||||
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
|
||||
translated_log = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
|
||||
"\(\s*_\(\s*('|\")")
|
||||
|
@ -294,10 +296,22 @@ def validate_log_translations(logical_line, physical_line, filename):
|
|||
# Translations are not required in the test directory
|
||||
# and the Xen utilities
|
||||
if ("nova/tests" in filename or
|
||||
"plugins/xenserver/xenapi/etc/xapi.d" in filename):
|
||||
"plugins/xenserver/xenapi/etc/xapi.d" in filename or
|
||||
# TODO(Mike_D):Needs to be remove with:
|
||||
# Iaebb239ef20a0da3df1e3552baf26f412d0fcdc0
|
||||
"nova/compute" in filename or
|
||||
"nova/cells" in filename or
|
||||
"nova/image" in filename or
|
||||
"nova/conductor" in filename or
|
||||
"nova/wsgi.py" in filename or
|
||||
"nova/filters.py" in filename or
|
||||
"nova/db" in filename):
|
||||
return
|
||||
if pep8.noqa(physical_line):
|
||||
return
|
||||
msg = "N328: LOG.info messages require translations `_LI()`!"
|
||||
if log_translation_info.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "N321: Log messages require translations!"
|
||||
if log_translation.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
|
|
@ -22,7 +22,7 @@ from oslo.config import cfg
|
|||
|
||||
from nova.compute import flavors
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.network import base_api
|
||||
from nova.network import floating_ips
|
||||
from nova.network import model as network_model
|
||||
|
@ -224,8 +224,8 @@ class API(base_api.NetworkAPI):
|
|||
if orig_instance_uuid:
|
||||
msg_dict = dict(address=floating_address,
|
||||
instance_id=orig_instance_uuid)
|
||||
LOG.info(_('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s') % msg_dict)
|
||||
LOG.info(_LI('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s'), msg_dict)
|
||||
orig_instance = objects.Instance.get_by_uuid(context,
|
||||
orig_instance_uuid)
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import sys
|
|||
from oslo.config import cfg
|
||||
from oslo.utils import importutils
|
||||
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _LE, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
driver_opts = [
|
||||
|
@ -39,6 +39,6 @@ def load_network_driver(network_driver=None):
|
|||
LOG.error(_LE("Network driver option required, but not specified"))
|
||||
sys.exit(1)
|
||||
|
||||
LOG.info(_("Loading network driver '%s'") % network_driver)
|
||||
LOG.info(_LI("Loading network driver '%s'"), network_driver)
|
||||
|
||||
return importutils.import_module(network_driver)
|
||||
|
|
|
@ -25,7 +25,7 @@ import six
|
|||
from nova import context
|
||||
from nova.db import base
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.network import rpcapi as network_rpcapi
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
|
@ -173,7 +173,7 @@ class FloatingIP(object):
|
|||
address,
|
||||
affect_auto_assigned=True)
|
||||
except exception.FloatingIpNotAssociated:
|
||||
LOG.info(_("Floating IP %s is not associated. Ignore."),
|
||||
LOG.info(_LI("Floating IP %s is not associated. Ignore."),
|
||||
address)
|
||||
# deallocate if auto_assigned
|
||||
if floating_ip.auto_assigned:
|
||||
|
@ -532,7 +532,7 @@ class FloatingIP(object):
|
|||
if not floating_addresses or (source and source == dest):
|
||||
return
|
||||
|
||||
LOG.info(_("Starting migration network for instance %s"),
|
||||
LOG.info(_LI("Starting migration network for instance %s"),
|
||||
instance_uuid)
|
||||
for address in floating_addresses:
|
||||
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
||||
|
@ -567,7 +567,7 @@ class FloatingIP(object):
|
|||
if not floating_addresses or (source and source == dest):
|
||||
return
|
||||
|
||||
LOG.info(_("Finishing migration network for instance %s"),
|
||||
LOG.info(_LI("Finishing migration network for instance %s"),
|
||||
instance_uuid)
|
||||
|
||||
for address in floating_addresses:
|
||||
|
|
|
@ -19,7 +19,7 @@ import tempfile
|
|||
from oslo.config import cfg
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LI
|
||||
from nova.network import dns_driver
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
|
@ -198,7 +198,7 @@ class MiniDNS(dns_driver.DNSDriver):
|
|||
entry['domain'] != fqdomain.lower()):
|
||||
outfile.write(line)
|
||||
else:
|
||||
LOG.info(_("deleted %s"), entry)
|
||||
LOG.info(_LI("deleted %s"), entry)
|
||||
deleted = True
|
||||
infile.close()
|
||||
outfile.close()
|
||||
|
|
|
@ -28,7 +28,7 @@ from nova.api.openstack import extensions
|
|||
from nova.compute import flavors
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova.network import base_api
|
||||
from nova.network import model as network_model
|
||||
from nova.network import neutronv2
|
||||
|
@ -530,7 +530,7 @@ class API(base_api.NetworkAPI):
|
|||
neutronv2.get_client(context).update_port(port,
|
||||
port_req_body)
|
||||
except Exception:
|
||||
LOG.info(_('Unable to reset device ID for port %s'), port,
|
||||
LOG.info(_LI('Unable to reset device ID for port %s'), port,
|
||||
instance=instance)
|
||||
|
||||
self._delete_ports(neutron, instance, ports, raise_if_fail=True)
|
||||
|
@ -946,8 +946,8 @@ class API(base_api.NetworkAPI):
|
|||
|
||||
msg_dict = dict(address=floating_address,
|
||||
instance_id=orig_instance_uuid)
|
||||
LOG.info(_('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s') % msg_dict)
|
||||
LOG.info(_LI('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s'), msg_dict)
|
||||
orig_instance = objects.Instance.get_by_uuid(context,
|
||||
orig_instance_uuid)
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ from webob import exc
|
|||
|
||||
from nova.compute import api as compute_api
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.network import neutronv2
|
||||
from nova.network.security_group import security_group_base
|
||||
from nova import objects
|
||||
|
@ -437,8 +437,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
port['security_groups'].append(security_group_id)
|
||||
updated_port = {'security_groups': port['security_groups']}
|
||||
try:
|
||||
LOG.info(_("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s"),
|
||||
LOG.info(_LI("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s"),
|
||||
{'security_group_id': security_group_id,
|
||||
'port_id': port['id']})
|
||||
neutron.update_port(port['id'], {'port': updated_port})
|
||||
|
@ -492,8 +492,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
|
||||
updated_port = {'security_groups': port['security_groups']}
|
||||
try:
|
||||
LOG.info(_("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s"),
|
||||
LOG.info(_LI("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s"),
|
||||
{'security_group_id': security_group_id,
|
||||
'port_id': port['id']})
|
||||
neutron.update_port(port['id'], {'port': updated_port})
|
||||
|
|
|
@ -25,7 +25,7 @@ from oslo.config import cfg
|
|||
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _, _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import rpc
|
||||
from nova.scheduler import driver
|
||||
|
@ -75,10 +75,10 @@ class FilterScheduler(driver.Scheduler):
|
|||
self.notifier.info(context, 'scheduler.run_instance.start', payload)
|
||||
|
||||
instance_uuids = request_spec.get('instance_uuids')
|
||||
LOG.info(_("Attempting to build %(num_instances)d instance(s) "
|
||||
"uuids: %(instance_uuids)s"),
|
||||
{'num_instances': len(instance_uuids),
|
||||
'instance_uuids': instance_uuids})
|
||||
LOG.info(_LI("Attempting to build %(num_instances)d instance(s) "
|
||||
"uuids: %(instance_uuids)s"),
|
||||
{'num_instances': len(instance_uuids),
|
||||
'instance_uuids': instance_uuids})
|
||||
LOG.debug("Request Spec: %s" % request_spec)
|
||||
|
||||
# check retry policy. Rather ugly use of instance_uuids[0]...
|
||||
|
@ -104,10 +104,10 @@ class FilterScheduler(driver.Scheduler):
|
|||
try:
|
||||
try:
|
||||
weighed_host = weighed_hosts.pop(0)
|
||||
LOG.info(_("Choosing host %(weighed_host)s "
|
||||
"for instance %(instance_uuid)s"),
|
||||
{'weighed_host': weighed_host,
|
||||
'instance_uuid': instance_uuid})
|
||||
LOG.info(_LI("Choosing host %(weighed_host)s "
|
||||
"for instance %(instance_uuid)s"),
|
||||
{'weighed_host': weighed_host,
|
||||
'instance_uuid': instance_uuid})
|
||||
except IndexError:
|
||||
raise exception.NoValidHost(reason="")
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ from nova.compute import task_states
|
|||
from nova.compute import vm_states
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _, _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.pci import stats as pci_stats
|
||||
from nova.scheduler import filters
|
||||
|
@ -419,8 +419,8 @@ class HostManager(object):
|
|||
dead_nodes = set(self.host_state_map.keys()) - seen_nodes
|
||||
for state_key in dead_nodes:
|
||||
host, node = state_key
|
||||
LOG.info(_("Removing dead compute node %(host)s:%(node)s "
|
||||
"from scheduler") % {'host': host, 'node': node})
|
||||
LOG.info(_LI("Removing dead compute node %(host)s:%(node)s "
|
||||
"from scheduler"), {'host': host, 'node': node})
|
||||
del self.host_state_map[state_key]
|
||||
|
||||
return self.host_state_map.itervalues()
|
||||
|
|
|
@ -179,18 +179,16 @@ class HackingTestCase(test.NoDBTestCase):
|
|||
'exception']
|
||||
levels = ['_LI', '_LW', '_LE', '_LC']
|
||||
debug = "LOG.debug('OK')"
|
||||
self.assertEqual(0,
|
||||
len(list(
|
||||
checks.validate_log_translations(debug, debug, 'f'))))
|
||||
audit = "LOG.audit(_('OK'))"
|
||||
self.assertEqual(
|
||||
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
|
||||
self.assertEqual(
|
||||
0, len(list(checks.validate_log_translations(audit, audit, 'f'))))
|
||||
for log in logs:
|
||||
bad = 'LOG.%s("Bad")' % log
|
||||
self.assertEqual(1,
|
||||
len(list(
|
||||
checks.validate_log_translations(bad, bad, 'f'))))
|
||||
ok = "LOG.%s(_('OK'))" % log
|
||||
self.assertEqual(0,
|
||||
len(list(
|
||||
checks.validate_log_translations(ok, ok, 'f'))))
|
||||
ok = "LOG.%s('OK') # noqa" % log
|
||||
self.assertEqual(0,
|
||||
len(list(
|
||||
|
|
|
@ -18,7 +18,7 @@ import time
|
|||
|
||||
from oslo.utils import importutils
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
||||
|
@ -118,7 +118,7 @@ class Mount(object):
|
|||
start_time = time.time()
|
||||
device = self._inner_get_dev()
|
||||
while not device:
|
||||
LOG.info(_('Device allocation failed. Will retry in 2 seconds.'))
|
||||
LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.'))
|
||||
time.sleep(2)
|
||||
if time.time() - start_time > MAX_DEVICE_WAIT:
|
||||
LOG.warn(_('Device allocation failed after repeated retries.'))
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# under the License.
|
||||
"""Support for mounting images with the loop device."""
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt.disk.mount import api
|
||||
|
@ -30,7 +30,7 @@ class LoopMount(api.Mount):
|
|||
run_as_root=True)
|
||||
if err:
|
||||
self.error = _('Could not attach image to loopback: %s') % err
|
||||
LOG.info(_('Loop mount error: %s'), self.error)
|
||||
LOG.info(_LI('Loop mount error: %s'), self.error)
|
||||
self.linked = False
|
||||
self.device = None
|
||||
return False
|
||||
|
|
|
@ -20,7 +20,7 @@ import time
|
|||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt.disk.mount import api
|
||||
|
@ -88,7 +88,7 @@ class NbdMount(api.Mount):
|
|||
run_as_root=True)
|
||||
if err:
|
||||
self.error = _('qemu-nbd error: %s') % err
|
||||
LOG.info(_('NBD mount error: %s'), self.error)
|
||||
LOG.info(_LI('NBD mount error: %s'), self.error)
|
||||
return False
|
||||
|
||||
# NOTE(vish): this forks into another process, so give it a chance
|
||||
|
@ -101,7 +101,7 @@ class NbdMount(api.Mount):
|
|||
time.sleep(1)
|
||||
else:
|
||||
self.error = _('nbd device %s did not show up') % device
|
||||
LOG.info(_('NBD mount error: %s'), self.error)
|
||||
LOG.info(_LI('NBD mount error: %s'), self.error)
|
||||
|
||||
# Cleanup
|
||||
_out, err = utils.trycmd('qemu-nbd', '-d', device,
|
||||
|
|
|
@ -25,7 +25,7 @@ import sys
|
|||
from oslo.config import cfg
|
||||
from oslo.utils import importutils
|
||||
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
from nova.virt import event as virtevent
|
||||
|
@ -1374,7 +1374,7 @@ def load_compute_driver(virtapi, compute_driver=None):
|
|||
LOG.error(_LE("Compute driver option required, but not specified"))
|
||||
sys.exit(1)
|
||||
|
||||
LOG.info(_("Loading compute driver '%s'") % compute_driver)
|
||||
LOG.info(_LI("Loading compute driver '%s'"), compute_driver)
|
||||
try:
|
||||
driver = importutils.import_object_ns('nova.virt',
|
||||
compute_driver,
|
||||
|
|
|
@ -20,7 +20,6 @@ from oslo.utils import importutils
|
|||
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova import context
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.network import linux_net
|
||||
from nova import objects
|
||||
|
@ -171,8 +170,8 @@ class IptablesFirewallDriver(FirewallDriver):
|
|||
self.remove_filters_for_instance(instance)
|
||||
self.iptables.apply()
|
||||
else:
|
||||
LOG.info(_('Attempted to unfilter instance which is not '
|
||||
'filtered'), instance=instance)
|
||||
LOG.info(_LI('Attempted to unfilter instance which is not '
|
||||
'filtered'), instance=instance)
|
||||
|
||||
def prepare_instance_filter(self, instance, network_info):
|
||||
self.instance_info[instance['id']] = (instance, network_info)
|
||||
|
|
|
@ -28,7 +28,7 @@ if sys.platform == 'win32':
|
|||
import wmi
|
||||
|
||||
from nova import block_device
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt import driver
|
||||
|
||||
|
@ -68,8 +68,8 @@ class BaseVolumeUtils(object):
|
|||
initiator_name = str(temp[0])
|
||||
_winreg.CloseKey(key)
|
||||
except Exception:
|
||||
LOG.info(_("The ISCSI initiator name can't be found. "
|
||||
"Choosing the default one"))
|
||||
LOG.info(_LI("The ISCSI initiator name can't be found. "
|
||||
"Choosing the default one"))
|
||||
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
|
||||
if computer_system.PartofDomain:
|
||||
initiator_name += '.' + computer_system.Domain.lower()
|
||||
|
|
|
@ -246,7 +246,7 @@ class VMOps(object):
|
|||
def spawn(self, context, instance, image_meta, injected_files,
|
||||
admin_password, network_info, block_device_info=None):
|
||||
"""Create a new VM and start it."""
|
||||
LOG.info(_("Spawning new instance"), instance=instance)
|
||||
LOG.info(_LI("Spawning new instance"), instance=instance)
|
||||
|
||||
instance_name = instance['name']
|
||||
if self._vmutils.vm_exists(instance_name):
|
||||
|
@ -328,7 +328,7 @@ class VMOps(object):
|
|||
_('Invalid config_drive_format "%s"') %
|
||||
CONF.config_drive_format)
|
||||
|
||||
LOG.info(_('Using config drive for instance'), instance=instance)
|
||||
LOG.info(_LI('Using config drive for instance'), instance=instance)
|
||||
|
||||
extra_md = {}
|
||||
if admin_password and CONF.hyperv.config_drive_inject_password:
|
||||
|
@ -341,7 +341,7 @@ class VMOps(object):
|
|||
instance_path = self._pathutils.get_instance_dir(
|
||||
instance['name'])
|
||||
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
|
||||
LOG.info(_('Creating config drive at %(path)s'),
|
||||
LOG.info(_LI('Creating config drive at %(path)s'),
|
||||
{'path': configdrive_path_iso}, instance=instance)
|
||||
|
||||
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
|
||||
|
@ -389,7 +389,7 @@ class VMOps(object):
|
|||
def destroy(self, instance, network_info=None, block_device_info=None,
|
||||
destroy_disks=True):
|
||||
instance_name = instance['name']
|
||||
LOG.info(_("Got request to destroy instance"), instance=instance)
|
||||
LOG.info(_LI("Got request to destroy instance"), instance=instance)
|
||||
try:
|
||||
if self._vmutils.vm_exists(instance_name):
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ from oslo.vmware import vim_util
|
|||
import suds
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _, _LI, _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import uuidutils
|
||||
from nova.virt import driver
|
||||
|
@ -455,8 +455,8 @@ class VMwareVCDriver(driver.ComputeDriver):
|
|||
stats_dict = self._get_available_resources(host_stats)
|
||||
|
||||
else:
|
||||
LOG.info(_("Invalid cluster or resource pool"
|
||||
" name : %s") % nodename)
|
||||
LOG.info(_LI("Invalid cluster or resource pool"
|
||||
" name : %s"), nodename)
|
||||
|
||||
return stats_dict
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ from oslo.config import cfg
|
|||
from oslo.utils import timeutils
|
||||
from oslo.vmware import exceptions as vexc
|
||||
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt import imagecache
|
||||
from nova.virt.vmwareapi import ds_util
|
||||
|
@ -159,13 +159,13 @@ class ImageCacheManager(imagecache.ImageCacheManager):
|
|||
ds_util.mkdir(self._session, ts_path, dc_info.ref)
|
||||
except vexc.FileAlreadyExistsException:
|
||||
LOG.debug("Timestamp already exists.")
|
||||
LOG.info(_("Image %s is no longer used by this node. "
|
||||
"Pending deletion!"), image)
|
||||
LOG.info(_LI("Image %s is no longer used by this node. "
|
||||
"Pending deletion!"), image)
|
||||
else:
|
||||
dt = self._get_datetime_from_filename(str(ts))
|
||||
if timeutils.is_older_than(dt, age_seconds):
|
||||
LOG.info(_("Image %s is no longer used. "
|
||||
"Deleting!"), path)
|
||||
LOG.info(_LI("Image %s is no longer used. "
|
||||
"Deleting!"), path)
|
||||
# Image has aged - delete the image ID folder
|
||||
self._folder_delete(path, dc_info.ref)
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ from nova.compute import vm_states
|
|||
from nova.console import type as ctype
|
||||
from nova import context as nova_context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import uuidutils
|
||||
|
@ -506,7 +506,7 @@ class VMwareVMOps(object):
|
|||
CONF.config_drive_format)
|
||||
raise exception.InstancePowerOnFailure(reason=reason)
|
||||
|
||||
LOG.info(_('Using config drive for instance'), instance=instance)
|
||||
LOG.info(_LI('Using config drive for instance'), instance=instance)
|
||||
extra_md = {}
|
||||
if admin_password:
|
||||
extra_md['admin_pass'] = admin_password
|
||||
|
@ -1133,11 +1133,11 @@ class VMwareVMOps(object):
|
|||
timeout=timeout)
|
||||
|
||||
if instances_info["instance_count"] > 0:
|
||||
LOG.info(_("Found %(instance_count)d hung reboots "
|
||||
"older than %(timeout)d seconds") % instances_info)
|
||||
LOG.info(_LI("Found %(instance_count)d hung reboots "
|
||||
"older than %(timeout)d seconds"), instances_info)
|
||||
|
||||
for instance in instances:
|
||||
LOG.info(_("Automatically hard rebooting"), instance=instance)
|
||||
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
|
||||
self.compute_api.reboot(ctxt, instance, "HARD")
|
||||
|
||||
def get_info(self, instance):
|
||||
|
|
|
@ -21,7 +21,7 @@ from oslo.config import cfg
|
|||
from oslo.vmware import vim_util as vutil
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt.vmwareapi import vim_util
|
||||
from nova.virt.vmwareapi import vm_util
|
||||
|
@ -443,8 +443,8 @@ class VMwareVolumeOps(object):
|
|||
|
||||
# The volume has been moved from its original location.
|
||||
# Need to consolidate the VMDK files.
|
||||
LOG.info(_("The volume's backing has been relocated to %s. Need to "
|
||||
"consolidate backing disk file."), current_device_path)
|
||||
LOG.info(_LI("The volume's backing has been relocated to %s. Need to "
|
||||
"consolidate backing disk file."), current_device_path)
|
||||
|
||||
# Pick the resource pool on which the instance resides.
|
||||
# Move the volume to the datastore where the new VMDK file is present.
|
||||
|
|
|
@ -30,7 +30,7 @@ from nova.compute import utils as compute_utils
|
|||
from nova import context
|
||||
from nova import crypto
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
@ -393,20 +393,20 @@ def find_guest_agent(base_dir):
|
|||
# reconfigure the network from xenstore data,
|
||||
# so manipulation of files in /etc is not
|
||||
# required
|
||||
LOG.info(_('XenServer tools installed in this '
|
||||
'image are capable of network injection. '
|
||||
'Networking files will not be'
|
||||
'manipulated'))
|
||||
LOG.info(_LI('XenServer tools installed in this '
|
||||
'image are capable of network injection. '
|
||||
'Networking files will not be'
|
||||
'manipulated'))
|
||||
return True
|
||||
xe_daemon_filename = os.path.join(base_dir,
|
||||
'usr', 'sbin', 'xe-daemon')
|
||||
if os.path.isfile(xe_daemon_filename):
|
||||
LOG.info(_('XenServer tools are present '
|
||||
'in this image but are not capable '
|
||||
'of network injection'))
|
||||
LOG.info(_LI('XenServer tools are present '
|
||||
'in this image but are not capable '
|
||||
'of network injection'))
|
||||
else:
|
||||
LOG.info(_('XenServer tools are not '
|
||||
'installed in this image'))
|
||||
LOG.info(_LI('XenServer tools are not '
|
||||
'installed in this image'))
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ from nova.compute import vm_mode
|
|||
from nova.compute import vm_states
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.pci import whitelist as pci_whitelist
|
||||
|
@ -73,10 +73,11 @@ class Host(object):
|
|||
name = vm_rec['name_label']
|
||||
uuid = _uuid_find(ctxt, host, name)
|
||||
if not uuid:
|
||||
LOG.info(_('Instance %(name)s running on %(host)s'
|
||||
' could not be found in the database:'
|
||||
' assuming it is a worker VM and skip'
|
||||
' ping migration to a new host'),
|
||||
LOG.info(_LI('Instance %(name)s running on '
|
||||
'%(host)s could not be found in '
|
||||
'the database: assuming it is a '
|
||||
'worker VM and skip ping migration '
|
||||
'to a new host'),
|
||||
{'name': name, 'host': host})
|
||||
continue
|
||||
instance = objects.Instance.get_by_uuid(ctxt, uuid)
|
||||
|
|
|
@ -390,11 +390,11 @@ def unplug_vbd(session, vbd_ref, this_vm_ref):
|
|||
except session.XenAPI.Failure as exc:
|
||||
err = len(exc.details) > 0 and exc.details[0]
|
||||
if err == 'DEVICE_ALREADY_DETACHED':
|
||||
LOG.info(_('VBD %s already detached'), vbd_ref)
|
||||
LOG.info(_LI('VBD %s already detached'), vbd_ref)
|
||||
return
|
||||
elif _should_retry_unplug_vbd(err):
|
||||
LOG.info(_('VBD %(vbd_ref)s uplug failed with "%(err)s", '
|
||||
'attempt %(num_attempt)d/%(max_attempts)d'),
|
||||
LOG.info(_LI('VBD %(vbd_ref)s uplug failed with "%(err)s", '
|
||||
'attempt %(num_attempt)d/%(max_attempts)d'),
|
||||
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
|
||||
'max_attempts': max_attempts, 'err': err})
|
||||
else:
|
||||
|
@ -2171,7 +2171,7 @@ def cleanup_attached_vdis(session):
|
|||
if 'nova_instance_uuid' in vdi_rec['other_config']:
|
||||
# Belongs to an instance and probably left over after an
|
||||
# unclean restart
|
||||
LOG.info(_('Disconnecting stale VDI %s from compute domU'),
|
||||
LOG.info(_LI('Disconnecting stale VDI %s from compute domU'),
|
||||
vdi_rec['uuid'])
|
||||
unplug_vbd(session, vbd_ref, this_vm_ref)
|
||||
destroy_vbd(session, vbd_ref)
|
||||
|
@ -2464,7 +2464,7 @@ def _mounted_processing(device, key, net, metadata):
|
|||
vfs = vfsimpl.VFSLocalFS(imgfile=None,
|
||||
imgfmt=None,
|
||||
imgdir=tmpdir)
|
||||
LOG.info(_('Manipulating interface files directly'))
|
||||
LOG.info(_LI('Manipulating interface files directly'))
|
||||
# for xenapi, we don't 'inject' admin_password here,
|
||||
# it's handled at instance startup time, nor do we
|
||||
# support injecting arbitrary files here.
|
||||
|
@ -2473,8 +2473,8 @@ def _mounted_processing(device, key, net, metadata):
|
|||
finally:
|
||||
utils.execute('umount', dev_path, run_as_root=True)
|
||||
else:
|
||||
LOG.info(_('Failed to mount filesystem (expected for '
|
||||
'non-linux instances): %s') % err)
|
||||
LOG.info(_LI('Failed to mount filesystem (expected for '
|
||||
'non-linux instances): %s'), err)
|
||||
|
||||
|
||||
def ensure_correct_host(session):
|
||||
|
|
|
@ -41,7 +41,7 @@ from nova.compute import vm_states
|
|||
from nova.console import type as ctype
|
||||
from nova import context as nova_context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _, _LE, _LI
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.pci import manager as pci_manager
|
||||
|
@ -1210,8 +1210,8 @@ class VMOps(object):
|
|||
details = exc.details
|
||||
if (details[0] == 'VM_BAD_POWER_STATE' and
|
||||
details[-1] == 'halted'):
|
||||
LOG.info(_("Starting halted instance found during reboot"),
|
||||
instance=instance)
|
||||
LOG.info(_LI("Starting halted instance found during reboot"),
|
||||
instance=instance)
|
||||
self._start(instance, vm_ref=vm_ref,
|
||||
bad_volumes_callback=bad_volumes_callback)
|
||||
return
|
||||
|
@ -1414,7 +1414,7 @@ class VMOps(object):
|
|||
destroy_* methods are internal.
|
||||
|
||||
"""
|
||||
LOG.info(_("Destroying VM"), instance=instance)
|
||||
LOG.info(_LI("Destroying VM"), instance=instance)
|
||||
|
||||
# We don't use _get_vm_opaque_ref because the instance may
|
||||
# truly not exist because of a failure during build. A valid
|
||||
|
@ -1616,11 +1616,11 @@ class VMOps(object):
|
|||
timeout=timeout)
|
||||
|
||||
if instances_info["instance_count"] > 0:
|
||||
LOG.info(_("Found %(instance_count)d hung reboots "
|
||||
"older than %(timeout)d seconds") % instances_info)
|
||||
LOG.info(_LI("Found %(instance_count)d hung reboots "
|
||||
"older than %(timeout)d seconds") % instances_info)
|
||||
|
||||
for instance in instances:
|
||||
LOG.info(_("Automatically hard rebooting"), instance=instance)
|
||||
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
|
||||
self.compute_api.reboot(ctxt, instance, "HARD")
|
||||
|
||||
def get_info(self, instance, vm_ref=None):
|
||||
|
|
|
@ -20,7 +20,7 @@ Management class for Storage-related functions (attach, detach, etc).
|
|||
from oslo.utils import excutils
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _, _LI
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt.xenapi import vm_utils
|
||||
from nova.virt.xenapi import volume_utils
|
||||
|
@ -61,7 +61,7 @@ class VolumeOps(object):
|
|||
vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
|
||||
connection_data)
|
||||
vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
|
||||
LOG.info(_('Connected volume (vdi_uuid): %s'), vdi_uuid)
|
||||
LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)
|
||||
|
||||
if vm_ref:
|
||||
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
|
||||
|
@ -124,8 +124,8 @@ class VolumeOps(object):
|
|||
LOG.debug("Plugging VBD: %s", vbd_ref)
|
||||
self._session.VBD.plug(vbd_ref, vm_ref)
|
||||
|
||||
LOG.info(_('Dev %(dev_number)s attached to'
|
||||
' instance %(instance_name)s'),
|
||||
LOG.info(_LI('Dev %(dev_number)s attached to'
|
||||
' instance %(instance_name)s'),
|
||||
{'instance_name': instance_name, 'dev_number': dev_number})
|
||||
|
||||
def detach_volume(self, connection_info, instance_name, mountpoint):
|
||||
|
@ -146,8 +146,8 @@ class VolumeOps(object):
|
|||
instance_name)
|
||||
else:
|
||||
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
|
||||
LOG.info(_('Mountpoint %(mountpoint)s detached from instance'
|
||||
' %(instance_name)s'),
|
||||
LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance'
|
||||
' %(instance_name)s'),
|
||||
{'instance_name': instance_name,
|
||||
'mountpoint': mountpoint})
|
||||
|
||||
|
|
Loading…
Reference in New Issue