Don't translate debug level logs in nova.virt

Our translation policy
(https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation) calls
for not translating debug level logs. This is to help prioritize log
translation. Furthermore translation has a performance overhead, even if
the log isn't used (since nova doesn't support lazy translation yet).

Change-Id: I524b48f530d8afd59a067074332e3964426e4d70
This commit is contained in:
Gary Kotton 2014-04-30 05:50:21 -07:00
parent 4d7a65d9f1
commit cfd0846498
61 changed files with 644 additions and 654 deletions

View File

@ -238,6 +238,7 @@ def no_translate_debug_logs(logical_line, filename):
"nova/storage",
"nova/tests",
"nova/vnc",
"nova/virt",
]
if max([name in filename for name in dirs]):
if logical_line.startswith("LOG.debug(_("):

View File

@ -122,6 +122,6 @@ class IBootManager(base.PowerManager):
return self.state
def is_power_on(self):
LOG.debug(_("Checking if %s is running"), self.node_name)
LOG.debug("Checking if %s is running", self.node_name)
self._create_connection()
return self._get_relay(self.relay_id)

View File

@ -134,7 +134,7 @@ class IPMI(base.PowerManager):
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"),
LOG.debug("ipmitool stdout: '%(out)s', stderr: '%(err)s'",
{'out': out, 'err': err})
return out, err
finally:

View File

@ -107,7 +107,7 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
to the two phases of booting. This may be extended later.
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
LOG.debug("Building PXE config for deployment %s.", deployment_id)
network_config = None
if network_info and CONF.baremetal.pxe_network_config:
@ -245,8 +245,8 @@ class PXE(base.NodeDriver):
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
LOG.debug("Fetching kernel and ramdisk for instance %s",
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
@ -277,8 +277,8 @@ class PXE(base.NodeDriver):
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
LOG.debug("Fetching image %(ami)s for instance %(name)s",
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
@ -318,8 +318,8 @@ class PXE(base.NodeDriver):
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
LOG.debug("Injecting files into image for instance %(name)s",
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),

View File

@ -128,8 +128,8 @@ class Tilera(base.NodeDriver):
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
LOG.debug("Fetching kernel and ramdisk for instance %s",
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
@ -159,8 +159,8 @@ class Tilera(base.NodeDriver):
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
LOG.debug("Fetching image %(ami)s for instance %(name)s",
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
@ -196,8 +196,8 @@ class Tilera(base.NodeDriver):
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
LOG.debug("Injecting files into image for instance %(name)s",
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),

View File

@ -35,8 +35,8 @@ class BareMetalVIFDriver(object):
pass
def plug(self, instance, vif):
LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(vif)s")
% {'uuid': instance['uuid'], 'vif': vif})
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(vif)s",
{'uuid': instance['uuid'], 'vif': vif})
vif_uuid = vif['id']
ctx = context.get_admin_context()
node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
@ -47,8 +47,8 @@ class BareMetalVIFDriver(object):
for pif in pifs:
if not pif['vif_uuid']:
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
LOG.debug("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)",
{'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, vif, pif)
return
@ -60,15 +60,15 @@ class BareMetalVIFDriver(object):
% {'id': node['id'], 'vif_uuid': vif_uuid})
def unplug(self, instance, vif):
LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(vif)s"),
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(vif)s",
{'uuid': instance['uuid'], 'vif': vif})
vif_uuid = vif['id']
ctx = context.get_admin_context()
try:
pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
LOG.debug("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)",
{'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, vif, pif)
except exception.NovaException:
LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)

View File

@ -84,7 +84,7 @@ class VirtualPowerManager(base.PowerManager):
global _cmds
if _cmds is None:
LOG.debug(_("Setting up %s commands."),
LOG.debug("Setting up %s commands.",
CONF.baremetal.virtual_power_type)
_vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \
CONF.baremetal.virtual_power_type
@ -132,13 +132,13 @@ class VirtualPowerManager(base.PowerManager):
self._connection = connection.ssh_connect(self.connection_data)
def _get_full_node_list(self):
LOG.debug(_("Getting full node list."))
LOG.debug("Getting full node list.")
cmd = self._vp_cmd.list_cmd
full_list = self._run_command(cmd)
return full_list
def _check_for_node(self):
LOG.debug(_("Looking up Name for Mac address %s."),
LOG.debug("Looking up Name for Mac address %s.",
self._mac_addresses)
self._matched_name = ''
full_node_list = self._get_full_node_list()
@ -190,7 +190,7 @@ class VirtualPowerManager(base.PowerManager):
return self.state
def is_power_on(self):
LOG.debug(_("Checking if %s is running"), self._node_name)
LOG.debug("Checking if %s is running", self._node_name)
if not self._check_for_node():
err_msg = _('Node "%(name)s" with MAC address %(mac)s not found.')
@ -227,7 +227,7 @@ class VirtualPowerManager(base.PowerManager):
stdout, stderr = processutils.ssh_execute(
self._connection, cmd, check_exit_code=check_exit_code)
result = stdout.strip().splitlines()
LOG.debug(_('Result for run_command: %s'), result)
LOG.debug('Result for run_command: %s', result)
except processutils.ProcessExecutionError:
result = []
LOG.exception(_("Error running command: %s"), cmd)

View File

@ -93,7 +93,7 @@ class ConfigDriveBuilder(object):
def add_instance_metadata(self, instance_md):
for (path, value) in instance_md.metadata_for_config_drive():
self._add_file(path, value)
LOG.debug(_('Added %(filepath)s to config drive'),
LOG.debug('Added %(filepath)s to config drive',
{'filepath': path})
def _make_iso9660(self, path):

View File

@ -125,8 +125,8 @@ def resize2fs(image, check_exit_code=False, run_as_root=False):
check_exit_code=[0, 1, 2],
run_as_root=run_as_root)
except processutils.ProcessExecutionError as exc:
LOG.debug(_("Checking the file system with e2fsck has failed, "
"the resize will be aborted. (%s)"), exc)
LOG.debug("Checking the file system with e2fsck has failed, "
"the resize will be aborted. (%s)", exc)
else:
utils.execute('resize2fs',
image,
@ -159,8 +159,8 @@ def extend(image, size, use_cow=False):
try:
resize2fs(dev, run_as_root=run_as_root, check_exit_code=[0])
except processutils.ProcessExecutionError as exc:
LOG.debug(_("Resizing the file system with resize2fs "
"has failed with error: %s"), exc)
LOG.debug("Resizing the file system with resize2fs "
"has failed with error: %s", exc)
finally:
finally_call()
@ -181,13 +181,13 @@ def extend(image, size, use_cow=False):
def can_resize_image(image, size):
"""Check whether we can resize the container image file."""
LOG.debug(_('Checking if we can resize image %(image)s. '
'size=%(size)s'), {'image': image, 'size': size})
LOG.debug('Checking if we can resize image %(image)s. '
'size=%(size)s', {'image': image, 'size': size})
# Check that we're increasing the size
virt_size = get_disk_size(image)
if virt_size >= size:
LOG.debug(_('Cannot resize image %s to a smaller size.'),
LOG.debug('Cannot resize image %s to a smaller size.',
image)
return False
return True
@ -195,8 +195,8 @@ def can_resize_image(image, size):
def is_image_partitionless(image, use_cow=False):
"""Check whether we can resize contained file system."""
LOG.debug(_('Checking if we can resize filesystem inside %(image)s. '
'CoW=%(use_cow)s'), {'image': image, 'use_cow': use_cow})
LOG.debug('Checking if we can resize filesystem inside %(image)s. '
'CoW=%(use_cow)s', {'image': image, 'use_cow': use_cow})
# Check the image is unpartitioned
if use_cow:
@ -205,8 +205,8 @@ def is_image_partitionless(image, use_cow=False):
fs.setup()
fs.teardown()
except exception.NovaException as e:
LOG.debug(_('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.'),
LOG.debug('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.',
{'image': image,
'error': e})
return False
@ -215,8 +215,8 @@ def is_image_partitionless(image, use_cow=False):
try:
utils.execute('e2label', image)
except processutils.ProcessExecutionError as e:
LOG.debug(_('Unable to determine label for image %(image)s with '
'error %(error)s. Cannot resize.'),
LOG.debug('Unable to determine label for image %(image)s with '
'error %(error)s. Cannot resize.',
{'image': image,
'error': e})
return False
@ -338,9 +338,9 @@ def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
Returns True if all requested operations completed without issue.
Raises an exception if a mandatory item can't be injected.
"""
LOG.debug(_("Inject data image=%(image)s key=%(key)s net=%(net)s "
"metadata=%(metadata)s admin_password=<SANITIZED> "
"files=%(files)s partition=%(partition)s use_cow=%(use_cow)s"),
LOG.debug("Inject data image=%(image)s key=%(key)s net=%(net)s "
"metadata=%(metadata)s admin_password=<SANITIZED> "
"files=%(files)s partition=%(partition)s use_cow=%(use_cow)s",
{'image': image, 'key': key, 'net': net, 'metadata': metadata,
'files': files, 'partition': partition, 'use_cow': use_cow})
fmt = "raw"
@ -400,11 +400,11 @@ def teardown_container(container_dir, container_root_device=None):
# Make sure container_root_device is released when teardown container.
if container_root_device:
if 'loop' in container_root_device:
LOG.debug(_("Release loop device %s"), container_root_device)
LOG.debug("Release loop device %s", container_root_device)
utils.execute('losetup', '--detach', container_root_device,
run_as_root=True, attempts=3)
else:
LOG.debug(_('Release nbd device %s'), container_root_device)
LOG.debug('Release nbd device %s', container_root_device)
utils.execute('qemu-nbd', '-d', container_root_device,
run_as_root=True)
except Exception as exn:
@ -465,7 +465,7 @@ def _inject_files_into_fs(files, fs):
def _inject_file_into_fs(fs, path, contents, append=False):
LOG.debug(_("Inject file fs=%(fs)s path=%(path)s append=%(append)s"),
LOG.debug("Inject file fs=%(fs)s path=%(path)s append=%(append)s",
{'fs': fs, 'path': path, 'append': append})
if append:
fs.append_file(path, contents)
@ -474,7 +474,7 @@ def _inject_file_into_fs(fs, path, contents, append=False):
def _inject_metadata_into_fs(metadata, fs):
LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s"),
LOG.debug("Inject metadata fs=%(fs)s metadata=%(metadata)s",
{'fs': fs, 'metadata': metadata})
_inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
@ -514,7 +514,7 @@ def _inject_key_into_fs(key, fs):
fs is the path to the base of the filesystem into which to inject the key.
"""
LOG.debug(_("Inject key fs=%(fs)s key=%(key)s"), {'fs': fs, 'key': key})
LOG.debug("Inject key fs=%(fs)s key=%(key)s", {'fs': fs, 'key': key})
sshdir = os.path.join('root', '.ssh')
fs.make_path(sshdir)
fs.set_ownership(sshdir, "root", "root")
@ -542,7 +542,7 @@ def _inject_net_into_fs(net, fs):
net is the contents of /etc/network/interfaces.
"""
LOG.debug(_("Inject key fs=%(fs)s net=%(net)s"), {'fs': fs, 'net': net})
LOG.debug("Inject key fs=%(fs)s net=%(net)s", {'fs': fs, 'net': net})
netdir = os.path.join('etc', 'network')
fs.make_path(netdir)
fs.set_ownership(netdir, "root", "root")
@ -567,8 +567,8 @@ def _inject_admin_password_into_fs(admin_passwd, fs):
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
LOG.debug(_("Inject admin password fs=%(fs)s "
"admin_passwd=<SANITIZED>"), {'fs': fs})
LOG.debug("Inject admin password fs=%(fs)s "
"admin_passwd=<SANITIZED>", {'fs': fs})
admin_user = 'root'
passwd_path = os.path.join('etc', 'passwd')

View File

@ -37,36 +37,36 @@ class Mount(object):
@staticmethod
def instance_for_format(imgfile, mountdir, partition, imgfmt):
LOG.debug(_("Instance for format imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"imgfmt=%(imgfmt)s"),
LOG.debug("Instance for format imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"imgfmt=%(imgfmt)s",
{'imgfile': imgfile, 'mountdir': mountdir,
'partition': partition, 'imgfmt': imgfmt})
if imgfmt == "raw":
LOG.debug(_("Using LoopMount"))
LOG.debug("Using LoopMount")
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
imgfile, mountdir, partition)
else:
LOG.debug(_("Using NbdMount"))
LOG.debug("Using NbdMount")
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
imgfile, mountdir, partition)
@staticmethod
def instance_for_device(imgfile, mountdir, partition, device):
LOG.debug(_("Instance for device imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"device=%(device)s"),
LOG.debug("Instance for device imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"device=%(device)s",
{'imgfile': imgfile, 'mountdir': mountdir,
'partition': partition, 'device': device})
if "loop" in device:
LOG.debug(_("Using LoopMount"))
LOG.debug("Using LoopMount")
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
imgfile, mountdir, partition, device)
else:
LOG.debug(_("Using NbdMount"))
LOG.debug("Using NbdMount")
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
imgfile, mountdir, partition, device)
@ -135,7 +135,7 @@ class Mount(object):
def map_dev(self):
"""Map partitions of the device to the file system namespace."""
assert(os.path.exists(self.device))
LOG.debug(_("Map dev %s"), self.device)
LOG.debug("Map dev %s", self.device)
automapped_path = '/dev/%sp%s' % (os.path.basename(self.device),
self.partition)
@ -179,7 +179,7 @@ class Mount(object):
"""Remove partitions of the device from the file system namespace."""
if not self.mapped:
return
LOG.debug(_("Unmap dev %s"), self.device)
LOG.debug("Unmap dev %s", self.device)
if self.partition and not self.automapped:
utils.execute('kpartx', '-d', self.device, run_as_root=True)
self.mapped = False
@ -187,7 +187,7 @@ class Mount(object):
def mnt_dev(self):
"""Mount the device into the file system."""
LOG.debug(_("Mount %(dev)s on %(dir)s") %
LOG.debug("Mount %(dev)s on %(dir)s",
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
discard_warnings=True, run_as_root=True)
@ -204,7 +204,7 @@ class Mount(object):
if not self.mounted:
return
self.flush_dev()
LOG.debug(_("Umount %s") % self.mapped_device)
LOG.debug("Umount %s", self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
@ -218,7 +218,7 @@ class Mount(object):
status = self.get_dev() and self.map_dev() and self.mnt_dev()
finally:
if not status:
LOG.debug(_("Fail to mount, tearing back down"))
LOG.debug("Fail to mount, tearing back down")
self.do_teardown()
return status

View File

@ -36,7 +36,7 @@ class LoopMount(api.Mount):
return False
self.device = out.strip()
LOG.debug(_("Got loop device %s"), self.device)
LOG.debug("Got loop device %s", self.device)
self.linked = True
return True
@ -54,7 +54,7 @@ class LoopMount(api.Mount):
# NOTE(mikal): On some kernels, losetup -d will intermittently fail,
# thus leaking a loop device unless the losetup --detach is retried:
# https://lkml.org/lkml/2012/9/28/62
LOG.debug(_("Release loop device %s"), self.device)
LOG.debug("Release loop device %s", self.device)
utils.execute('losetup', '--detach', self.device, run_as_root=True,
attempts=3)
self.linked = False

View File

@ -82,7 +82,7 @@ class NbdMount(api.Mount):
# NOTE(mikal): qemu-nbd will return an error if the device file is
# already in use.
LOG.debug(_('Get nbd device %(dev)s for %(imgfile)s'),
LOG.debug('Get nbd device %(dev)s for %(imgfile)s',
{'dev': device, 'imgfile': self.image})
_out, err = utils.trycmd('qemu-nbd', '-c', device, self.image,
run_as_root=True)
@ -122,7 +122,7 @@ class NbdMount(api.Mount):
def unget_dev(self):
if not self.linked:
return
LOG.debug(_('Release nbd device %s'), self.device)
LOG.debug('Release nbd device %s', self.device)
utils.execute('qemu-nbd', '-d', self.device, run_as_root=True)
self.linked = False
self.device = None

View File

@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@ -23,25 +22,25 @@ class VFS(object):
@staticmethod
def instance_for_image(imgfile, imgfmt, partition):
LOG.debug(_("Instance for image imgfile=%(imgfile)s "
"imgfmt=%(imgfmt)s partition=%(partition)s"),
LOG.debug("Instance for image imgfile=%(imgfile)s "
"imgfmt=%(imgfmt)s partition=%(partition)s",
{'imgfile': imgfile, 'imgfmt': imgfmt,
'partition': partition})
hasGuestfs = False
try:
LOG.debug(_("Trying to import guestfs"))
LOG.debug("Trying to import guestfs")
importutils.import_module("guestfs")
hasGuestfs = True
except Exception:
pass
if hasGuestfs:
LOG.debug(_("Using primary VFSGuestFS"))
LOG.debug("Using primary VFSGuestFS")
return importutils.import_object(
"nova.virt.disk.vfs.guestfs.VFSGuestFS",
imgfile, imgfmt, partition)
else:
LOG.debug(_("Falling back to VFSLocalFS"))
LOG.debug("Falling back to VFSLocalFS")
return importutils.import_object(
"nova.virt.disk.vfs.localfs.VFSLocalFS",
imgfile, imgfmt, partition)

View File

@ -48,7 +48,7 @@ class VFSGuestFS(vfs.VFS):
self.setup_os_static()
def setup_os_static(self):
LOG.debug(_("Mount guest OS image %(imgfile)s partition %(part)s"),
LOG.debug("Mount guest OS image %(imgfile)s partition %(part)s",
{'imgfile': self.imgfile, 'part': str(self.partition)})
if self.partition:
@ -57,7 +57,7 @@ class VFSGuestFS(vfs.VFS):
self.handle.mount_options("", "/dev/sda", "/")
def setup_os_inspect(self):
LOG.debug(_("Inspecting guest OS image %s"), self.imgfile)
LOG.debug("Inspecting guest OS image %s", self.imgfile)
roots = self.handle.inspect_os()
if len(roots) == 0:
@ -65,7 +65,7 @@ class VFSGuestFS(vfs.VFS):
% self.imgfile)
if len(roots) != 1:
LOG.debug(_("Multi-boot OS %(roots)s") % {'roots': str(roots)})
LOG.debug("Multi-boot OS %(roots)s", {'roots': str(roots)})
raise exception.NovaException(
_("Multi-boot operating system found in %s") %
self.imgfile)
@ -73,7 +73,7 @@ class VFSGuestFS(vfs.VFS):
self.setup_os_root(roots[0])
def setup_os_root(self, root):
LOG.debug(_("Inspecting guest OS root filesystem %s"), root)
LOG.debug("Inspecting guest OS root filesystem %s", root)
mounts = self.handle.inspect_get_mountpoints(root)
if len(mounts) == 0:
@ -86,7 +86,7 @@ class VFSGuestFS(vfs.VFS):
root_mounted = False
for mount in mounts:
LOG.debug(_("Mounting %(dev)s at %(dir)s") %
LOG.debug("Mounting %(dev)s at %(dir)s",
{'dev': mount[1], 'dir': mount[0]})
try:
self.handle.mount_options("", mount[1], mount[0])
@ -102,7 +102,7 @@ class VFSGuestFS(vfs.VFS):
raise exception.NovaException(msg)
def setup(self):
LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") %
LOG.debug("Setting up appliance for %(imgfile)s %(imgfmt)s",
{'imgfile': self.imgfile, 'imgfmt': self.imgfmt})
try:
self.handle = tpool.Proxy(guestfs.GuestFS(close_on_exit=False))
@ -138,7 +138,7 @@ class VFSGuestFS(vfs.VFS):
raise
def teardown(self):
LOG.debug(_("Tearing down appliance"))
LOG.debug("Tearing down appliance")
try:
try:
@ -172,27 +172,27 @@ class VFSGuestFS(vfs.VFS):
return path
def make_path(self, path):
LOG.debug(_("Make directory path=%s"), path)
LOG.debug("Make directory path=%s", path)
path = self._canonicalize_path(path)
self.handle.mkdir_p(path)
def append_file(self, path, content):
LOG.debug(_("Append file path=%s"), path)
LOG.debug("Append file path=%s", path)
path = self._canonicalize_path(path)
self.handle.write_append(path, content)
def replace_file(self, path, content):
LOG.debug(_("Replace file path=%s"), path)
LOG.debug("Replace file path=%s", path)
path = self._canonicalize_path(path)
self.handle.write(path, content)
def read_file(self, path):
LOG.debug(_("Read file path=%s"), path)
LOG.debug("Read file path=%s", path)
path = self._canonicalize_path(path)
return self.handle.read_file(path)
def has_file(self, path):
LOG.debug(_("Has file path=%s"), path)
LOG.debug("Has file path=%s", path)
path = self._canonicalize_path(path)
try:
self.handle.stat(path)
@ -201,14 +201,14 @@ class VFSGuestFS(vfs.VFS):
return False
def set_permissions(self, path, mode):
LOG.debug(_("Set permissions path=%(path)s mode=%(mode)s"),
LOG.debug("Set permissions path=%(path)s mode=%(mode)s",
{'path': path, 'mode': mode})
path = self._canonicalize_path(path)
self.handle.chmod(mode, path)
def set_ownership(self, path, user, group):
LOG.debug(_("Set ownership path=%(path)s "
"user=%(user)s group=%(group)s"),
LOG.debug("Set ownership path=%(path)s "
"user=%(user)s group=%(group)s",
{'path': path, 'user': user, 'group': group})
path = self._canonicalize_path(path)
uid = -1
@ -221,6 +221,6 @@ class VFSGuestFS(vfs.VFS):
gid = int(self.handle.aug_get(
"/files/etc/group/" + group + "/gid"))
LOG.debug(_("chown uid=%(uid)d gid=%(gid)s"),
LOG.debug("chown uid=%(uid)d gid=%(gid)s",
{'uid': uid, 'gid': gid})
self.handle.chown(uid, gid, path)

View File

@ -63,12 +63,12 @@ class VFSLocalFS(vfs.VFS):
self.imgdir = tempfile.mkdtemp(prefix="openstack-vfs-localfs")
try:
if self.imgfmt == "raw":
LOG.debug(_("Using LoopMount"))
LOG.debug("Using LoopMount")
mount = loop.LoopMount(self.imgfile,
self.imgdir,
self.partition)
else:
LOG.debug(_("Using NbdMount"))
LOG.debug("Using NbdMount")
mount = nbd.NbdMount(self.imgfile,
self.imgdir,
self.partition)
@ -77,7 +77,7 @@ class VFSLocalFS(vfs.VFS):
self.mount = mount
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.debug(_("Failed to mount image %(ex)s)"), {'ex': str(e)})
LOG.debug("Failed to mount image %(ex)s)", {'ex': str(e)})
self.teardown()
def teardown(self):
@ -85,24 +85,24 @@ class VFSLocalFS(vfs.VFS):
if self.mount:
self.mount.do_teardown()
except Exception as e:
LOG.debug(_("Failed to unmount %(imgdir)s: %(ex)s") %
LOG.debug("Failed to unmount %(imgdir)s: %(ex)s",
{'imgdir': self.imgdir, 'ex': str(e)})
try:
if self.imgdir:
os.rmdir(self.imgdir)
except Exception as e:
LOG.debug(_("Failed to remove %(imgdir)s: %(ex)s") %
LOG.debug("Failed to remove %(imgdir)s: %(ex)s",
{'imgdir': self.imgdir, 'ex': str(e)})
self.imgdir = None
self.mount = None
def make_path(self, path):
LOG.debug(_("Make directory path=%s"), path)
LOG.debug("Make directory path=%s", path)
canonpath = self._canonical_path(path)
utils.execute('mkdir', '-p', canonpath, run_as_root=True)
def append_file(self, path, content):
LOG.debug(_("Append file path=%s"), path)
LOG.debug("Append file path=%s", path)
canonpath = self._canonical_path(path)
args = ["-a", canonpath]
@ -111,7 +111,7 @@ class VFSLocalFS(vfs.VFS):
utils.execute('tee', *args, **kwargs)
def replace_file(self, path, content):
LOG.debug(_("Replace file path=%s"), path)
LOG.debug("Replace file path=%s", path)
canonpath = self._canonical_path(path)
args = [canonpath]
@ -120,13 +120,13 @@ class VFSLocalFS(vfs.VFS):
utils.execute('tee', *args, **kwargs)
def read_file(self, path):
LOG.debug(_("Read file path=%s"), path)
LOG.debug("Read file path=%s", path)
canonpath = self._canonical_path(path)
return utils.read_file_as_root(canonpath)
def has_file(self, path):
LOG.debug(_("Has file path=%s"), path)
LOG.debug("Has file path=%s", path)
canonpath = self._canonical_path(path)
exists, _err = utils.trycmd('readlink', '-e',
canonpath,
@ -134,14 +134,14 @@ class VFSLocalFS(vfs.VFS):
return exists
def set_permissions(self, path, mode):
LOG.debug(_("Set permissions path=%(path)s mode=%(mode)o"),
LOG.debug("Set permissions path=%(path)s mode=%(mode)o",
{'path': path, 'mode': mode})
canonpath = self._canonical_path(path)
utils.execute('chmod', "%o" % mode, canonpath, run_as_root=True)
def set_ownership(self, path, user, group):
LOG.debug(_("Set permissions path=%(path)s "
"user=%(user)s group=%(group)s"),
LOG.debug("Set permissions path=%(path)s "
"user=%(user)s group=%(group)s",
{'path': path, 'user': user, 'group': group})
canonpath = self._canonical_path(path)
owner = None

View File

@ -1199,7 +1199,7 @@ class ComputeDriver(object):
"""
if not self._compute_event_callback:
LOG.debug(_("Discarding event %s") % str(event))
LOG.debug("Discarding event %s", str(event))
return
if not isinstance(event, virtevent.Event):
@ -1207,7 +1207,7 @@ class ComputeDriver(object):
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug(_("Emitting event %s") % str(event))
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_("Exception dispatching event %(event)s: %(ex)s"),

View File

@ -183,9 +183,9 @@ class IptablesFirewallDriver(FirewallDriver):
self.network_infos[instance['id']] = network_info
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
LOG.debug(_('Filters added to instance'), instance=instance)
LOG.debug('Filters added to instance', instance=instance)
self.refresh_provider_fw_rules()
LOG.debug(_('Provider Firewall Rules refreshed'), instance=instance)
LOG.debug('Provider Firewall Rules refreshed', instance=instance)
# Ensure that DHCP request rule is updated if necessary
if (self.dhcp_create and not self.dhcp_created):
self.iptables.ipv4['filter'].add_rule(
@ -367,7 +367,7 @@ class IptablesFirewallDriver(FirewallDriver):
rules = rules_cls.get_by_security_group(ctxt, security_group)
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule,
LOG.debug('Adding security group rule: %r', rule,
instance=instance)
if not rule['cidr']:
@ -498,7 +498,7 @@ class IptablesFirewallDriver(FirewallDriver):
ipv6_rules = []
rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
LOG.debug('Adding provider rule: %s', rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules

View File

@ -88,7 +88,7 @@ class BaseVolumeUtils(object):
for ephemeral in
driver.block_device_info_get_ephemerals(block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
LOG.debug("block_device_list %s", block_device_list)
return block_device.strip_dev(mount_device) in block_device_list
def _get_drive_number_from_disk_path(self, disk_path):

View File

@ -171,11 +171,11 @@ class HyperVDriver(driver.ComputeDriver):
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
LOG.debug(_("ensure_filtering_rules_for_instance called"),
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance_ref)
def unfilter_instance(self, instance, network_info):
LOG.debug(_("unfilter_instance called"), instance=instance)
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,

View File

@ -21,7 +21,6 @@ import platform
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
@ -89,7 +88,7 @@ class HostOps(object):
:returns: hypervisor version (ex. 12003)
"""
version = self._hostutils.get_windows_version().replace('.', '')
LOG.debug(_('Windows version: %s ') % version)
LOG.debug('Windows version: %s ', version)
return version
def get_available_resource(self):
@ -101,7 +100,7 @@ class HostOps(object):
:returns: dictionary describing resources
"""
LOG.debug(_('get_available_resource called'))
LOG.debug('get_available_resource called')
(total_mem_mb,
free_mem_mb,
@ -135,7 +134,7 @@ class HostOps(object):
return dic
def _update_stats(self):
LOG.debug(_("Updating host stats"))
LOG.debug("Updating host stats")
(total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info()
(total_hdd_gb,
@ -161,7 +160,7 @@ class HostOps(object):
If 'refresh' is True, run the update first.
"""
LOG.debug(_("get_host_stats called"))
LOG.debug("get_host_stats called")
if refresh or not self._stats:
self._update_stats()
@ -176,5 +175,5 @@ class HostOps(object):
if not host_ip:
# Return the first available address
host_ip = self._hostutils.get_local_ips()[0]
LOG.debug(_("Host IP address is: %s"), host_ip)
LOG.debug("Host IP address is: %s", host_ip)
return host_ip

View File

@ -77,13 +77,13 @@ class ImageCache(object):
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug(_("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s"),
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug(_("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s"),
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,

View File

@ -59,7 +59,7 @@ class LiveMigrationOps(object):
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug(_("live_migration called"), instance=instance_ref)
LOG.debug("live_migration called", instance=instance_ref)
instance_name = instance_ref["name"]
try:
@ -69,18 +69,18 @@ class LiveMigrationOps(object):
self._volumeops.logout_storage_target(target_iqn)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
"for instance: %s"), instance_name)
LOG.debug("Calling live migration recover_method "
"for instance: %s", instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Calling live migration post_method for instance: %s"),
LOG.debug("Calling live migration post_method for instance: %s",
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug(_("pre_live_migration called"), instance=instance)
LOG.debug("pre_live_migration called", instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
@ -94,7 +94,7 @@ class LiveMigrationOps(object):
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
LOG.debug("post_live_migration_at_destination called",
instance=instance_ref)
@check_os_version_requirement
@ -102,16 +102,16 @@ class LiveMigrationOps(object):
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug(_("check_can_live_migrate_destination called"), instance_ref)
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug(_("check_can_live_migrate_destination_cleanup called"))
LOG.debug("check_can_live_migrate_destination_cleanup called")
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug(_("check_can_live_migrate_source called"), instance_ref)
LOG.debug("check_can_live_migrate_source called", instance_ref)
return dest_check_data

View File

@ -74,7 +74,7 @@ class LiveMigrationUtils(object):
return vms[0]
def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
LOG.debug(_("Destroying existing remote planned VM: %s"),
LOG.debug("Destroying existing remote planned VM: %s",
planned_vm.ElementName)
vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
@ -96,7 +96,7 @@ class LiveMigrationUtils(object):
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
migration_setting_data = vsmsd.GetText_(1)
LOG.debug(_("Creating remote planned VM for VM: %s"),
LOG.debug("Creating remote planned VM for VM: %s",
vm.ElementName)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
@ -153,16 +153,16 @@ class LiveMigrationUtils(object):
old_disk_path = sasd.HostResource[0]
new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
LOG.debug(_("Replacing host resource "
"%(old_disk_path)s with "
"%(new_disk_path)s on planned VM %(vm_name)s"),
LOG.debug("Replacing host resource "
"%(old_disk_path)s with "
"%(new_disk_path)s on planned VM %(vm_name)s",
{'old_disk_path': old_disk_path,
'new_disk_path': new_disk_path,
'vm_name': vm_name})
sasd.HostResource = [new_disk_path]
updated_resource_setting_data.append(sasd.GetText_(1))
LOG.debug(_("Updating remote planned VM disk paths for VM: %s"),
LOG.debug("Updating remote planned VM disk paths for VM: %s",
vm_name)
vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
@ -198,7 +198,7 @@ class LiveMigrationUtils(object):
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
LOG.debug(_("Starting live migration for VM: %s"), vm.ElementName)
LOG.debug("Starting live migration for VM: %s", vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
@ -207,7 +207,7 @@ class LiveMigrationUtils(object):
self._vmutils.check_ret_val(ret_val, job_path)
def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
LOG.debug(_("Getting live migration networks for remote host: %s"),
LOG.debug("Getting live migration networks for remote host: %s",
dest_host)
migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
return migr_svc_rmt.MigrationServiceListenerIPAddressList

View File

@ -47,9 +47,9 @@ class MigrationOps(object):
same_host = False
if dest in self._hostutils.get_local_ips():
same_host = True
LOG.debug(_("Migration target is the source host"))
LOG.debug("Migration target is the source host")
else:
LOG.debug(_("Migration target host: %s") % dest)
LOG.debug("Migration target host: %s", dest)
instance_path = self._pathutils.get_instance_dir(instance_name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
@ -70,8 +70,8 @@ class MigrationOps(object):
for disk_file in disk_files:
# Skip the config drive as the instance is already configured
if os.path.basename(disk_file).lower() != 'configdrive.vhd':
LOG.debug(_('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"'),
LOG.debug('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"',
{'disk_file': disk_file, 'dest_path': dest_path})
self._pathutils.copy(disk_file, dest_path)
@ -110,7 +110,7 @@ class MigrationOps(object):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
LOG.debug(_("migrate_disk_and_power_off called"), instance=instance)
LOG.debug("migrate_disk_and_power_off called", instance=instance)
self._check_target_flavor(instance, flavor)
@ -130,7 +130,7 @@ class MigrationOps(object):
return ""
def confirm_migration(self, migration, instance, network_info):
LOG.debug(_("confirm_migration called"), instance=instance)
LOG.debug("confirm_migration called", instance=instance)
self._pathutils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
@ -145,7 +145,7 @@ class MigrationOps(object):
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("finish_revert_migration called"), instance=instance)
LOG.debug("finish_revert_migration called", instance=instance)
instance_name = instance['name']
self._revert_migration_files(instance_name)
@ -167,22 +167,22 @@ class MigrationOps(object):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug(_('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s'),
LOG.debug('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s',
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug(_("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s"),
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug(_("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s"),
LOG.debug("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
@ -204,14 +204,14 @@ class MigrationOps(object):
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug(_("Getting parent disk info for disk: %s"), vhd_path)
LOG.debug("Getting parent disk info for disk: %s", vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug(_("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s"),
LOG.debug("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s",
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
@ -222,9 +222,9 @@ class MigrationOps(object):
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug(_("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s"),
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
@ -233,7 +233,7 @@ class MigrationOps(object):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug(_("finish_migration called"), instance=instance)
LOG.debug("finish_migration called", instance=instance)
instance_name = instance['name']

View File

@ -90,12 +90,12 @@ class PathUtils(object):
def _check_create_dir(self, path):
if not self.exists(path):
LOG.debug(_('Creating directory: %s') % path)
LOG.debug('Creating directory: %s', path)
self.makedirs(path)
def _check_remove_dir(self, path):
if self.exists(path):
LOG.debug(_('Removing directory: %s') % path)
LOG.debug('Removing directory: %s', path)
self.rmtree(path)
def _get_instances_sub_dir(self, dir_name, remote_server=None,

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import hostops
from nova.virt.hyperv import utilsfactory
@ -28,12 +27,12 @@ class RDPConsoleOps(object):
self._rdpconsoleutils = utilsfactory.get_rdpconsoleutils()
def get_rdp_console(self, instance):
LOG.debug(_("get_rdp_console called"), instance=instance)
LOG.debug("get_rdp_console called", instance=instance)
host = self._hostops.get_host_ip_addr()
port = self._rdpconsoleutils.get_rdp_console_port()
vm_id = self._vmutils.get_vm_id(instance['name'])
LOG.debug(_("RDP console: %(host)s:%(port)s, %(vm_id)s") %
LOG.debug("RDP console: %(host)s:%(port)s, %(vm_id)s",
{"host": host, "port": port, "vm_id": vm_id})
return {'host': host,

View File

@ -50,7 +50,7 @@ class SnapshotOps(object):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
LOG.debug(_("Creating snapshot for instance %s"), instance_name)
LOG.debug("Creating snapshot for instance %s", instance_name)
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
@ -59,7 +59,7 @@ class SnapshotOps(object):
try:
src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
LOG.debug(_("Getting info for VHD %s"), src_vhd_path)
LOG.debug("Getting info for VHD %s", src_vhd_path)
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
@ -67,7 +67,7 @@ class SnapshotOps(object):
dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s',
{'src_vhd_path': src_vhd_path,
'dest_vhd_path': dest_vhd_path})
self._pathutils.copyfile(src_vhd_path, dest_vhd_path)
@ -78,46 +78,46 @@ class SnapshotOps(object):
else:
basename = os.path.basename(src_base_disk_path)
dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug(_('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s'),
LOG.debug('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s',
{'src_vhd_path': src_vhd_path,
'dest_base_disk_path': dest_base_disk_path})
self._pathutils.copyfile(src_base_disk_path,
dest_base_disk_path)
LOG.debug(_("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s"),
LOG.debug("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
dest_base_disk_path)
LOG.debug(_("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s"),
LOG.debug("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
LOG.debug(_("Updating Glance image %(name)s with content from "
"merged disk %(image_vhd_path)s"),
LOG.debug("Updating Glance image %(name)s with content from "
"merged disk %(image_vhd_path)s",
{'name': name, 'image_vhd_path': image_vhd_path})
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, name, image_vhd_path)
LOG.debug(_("Snapshot image %(name)s updated for VM "
"%(instance_name)s"),
LOG.debug("Snapshot image %(name)s updated for VM "
"%(instance_name)s",
{'name': name, 'instance_name': instance_name})
finally:
try:
LOG.debug(_("Removing snapshot %s"), name)
LOG.debug("Removing snapshot %s", name)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception as ex:
LOG.exception(ex)
LOG.warning(_('Failed to remove snapshot for VM %s')
% instance_name)
if export_dir:
LOG.debug(_('Removing directory: %s'), export_dir)
LOG.debug('Removing directory: %s', export_dir)
self._pathutils.rmtree(export_dir)

View File

@ -18,7 +18,6 @@ import abc
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import utilsfactory
@ -69,7 +68,7 @@ class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
CONF.hyperv.vswitch_name)
vm_name = instance['name']
LOG.debug(_('Creating vswitch port for instance: %s') % vm_name)
LOG.debug('Creating vswitch port for instance: %s', vm_name)
if self._netutils.vswitch_port_needed():
vswitch_data = self._netutils.create_vswitch_port(vswitch_path,
vm_name)

View File

@ -119,7 +119,7 @@ class VMOps(object):
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
LOG.debug("get_info called for instance", instance=instance)
instance_name = instance['name']
if not self._vmutils.vm_exists(instance_name):
@ -142,16 +142,16 @@ class VMOps(object):
try:
if CONF.use_cow_images:
LOG.debug(_("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s"),
LOG.debug("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
else:
LOG.debug(_("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s"),
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
@ -173,8 +173,8 @@ class VMOps(object):
'root_vhd_size': root_vhd_internal_size}
raise vmutils.HyperVException(error_msg)
elif root_vhd_internal_size > base_vhd_size:
LOG.debug(_("Resizing VHD %(root_vhd_path)s to new "
"size %(root_vhd_size)s"),
LOG.debug("Resizing VHD %(root_vhd_path)s to new "
"size %(root_vhd_size)s",
{'root_vhd_size': root_vhd_internal_size,
'root_vhd_path': root_vhd_path},
instance=instance)
@ -265,7 +265,7 @@ class VMOps(object):
root_vhd_path is None)
for vif in network_info:
LOG.debug(_('Creating nic for instance'), instance=instance)
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
@ -350,7 +350,7 @@ class VMOps(object):
self._vmutils.destroy_vm(instance_name)
self._disconnect_volumes(volume_drives)
else:
LOG.debug(_("Instance not found"), instance=instance)
LOG.debug("Instance not found", instance=instance)
if destroy_disks:
self._delete_disk_files(instance_name)
@ -361,51 +361,51 @@ class VMOps(object):
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug(_("Rebooting instance"), instance=instance)
LOG.debug("Rebooting instance", instance=instance)
self._set_vm_state(instance['name'],
constants.HYPERV_VM_STATE_REBOOT)
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug(_("Suspend instance"), instance=instance)
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
LOG.debug("Power off instance", instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_DISABLED)
def power_on(self, instance):
"""Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
LOG.debug("Power on instance", instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, vm_name, req_state):
try:
self._vmutils.set_vm_state(vm_name, req_state)
LOG.debug(_("Successfully changed state of VM %(vm_name)s"
" to: %(req_state)s"),
LOG.debug("Successfully changed state of VM %(vm_name)s"
" to: %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():

View File

@ -220,15 +220,15 @@ class VMUtils(object):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug(_('Creating VM %s'), vm_name)
LOG.debug('Creating VM %s', vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug(_('Setting memory for vm %s'), vm_name)
LOG.debug('Setting memory for vm %s', vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
LOG.debug('Set vCPUs for vm %s', vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name):
@ -389,8 +389,8 @@ class VMUtils(object):
#Invalid state for current operation (32775) typically means that
#the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug(_("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s"),
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
@ -477,7 +477,7 @@ class VMUtils(object):
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"),
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
{'desc': desc, 'elap': elap})
return job

View File

@ -26,7 +26,6 @@ if sys.platform == 'win32':
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
@ -241,7 +240,7 @@ class VMUtilsV2(vmutils.VMUtils):
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if not metric_def:
LOG.debug(_("Metric not found: %s") % metric_name)
LOG.debug("Metric not found: %s", metric_name)
else:
self._enable_metrics(metric_svc, vm, metric_def[0].path_())

View File

@ -90,15 +90,15 @@ class VolumeOps(object):
target_portal = data['target_portal']
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug(_("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s"),
LOG.debug("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s",
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
else:
LOG.debug(_("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s"),
LOG.debug("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s",
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
self._volutils.login_storage_target(target_lun, target_iqn,
@ -111,7 +111,7 @@ class VolumeOps(object):
ebs_root is True
"""
target_iqn = None
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s"),
LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
try:
@ -158,13 +158,13 @@ class VolumeOps(object):
self.detach_volume(vol['connection_info'], instance_name)
def logout_storage_target(self, target_iqn):
LOG.debug(_("Logging off storage target %s"), target_iqn)
LOG.debug("Logging off storage target %s", target_iqn)
self._volutils.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Detach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s"),
LOG.debug("Detach_volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
@ -176,7 +176,7 @@ class VolumeOps(object):
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
LOG.debug(_("Detaching physical disk from instance: %s"),
LOG.debug("Detaching physical disk from instance: %s",
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
@ -204,9 +204,9 @@ class VolumeOps(object):
target_iqn, target_lun)
if device_number in (None, -1):
attempt = i + 1
LOG.debug(_('Attempt %d to get device_number '
LOG.debug('Attempt %d to get device_number '
'from get_device_number_for_target failed. '
'Retrying...') % attempt)
'Retrying...', attempt)
time.sleep(CONF.hyperv.mounted_disk_query_retry_interval)
else:
break
@ -214,8 +214,8 @@ class VolumeOps(object):
if device_number in (None, -1):
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.debug(_('Device number: %(device_number)s, '
'target lun: %(target_lun)s'),
LOG.debug('Device number: %(device_number)s, '
'target lun: %(target_lun)s',
{'device_number': device_number, 'target_lun': target_lun})
#Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):

View File

@ -94,9 +94,9 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
else:
return
except vmutils.HyperVException as exc:
LOG.debug(_("Attempt %(attempt)d to connect to target "
"%(target_iqn)s failed. Retrying. "
"Exceptipn: %(exc)s ") %
LOG.debug("Attempt %(attempt)d to connect to target "
"%(target_iqn)s failed. Retrying. "
"Exceptipn: %(exc)s ",
{'target_iqn': target_iqn,
'exc': exc,
'attempt': attempt})

View File

@ -24,7 +24,6 @@ helpers for populating up config object instances.
"""
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
@ -68,7 +67,7 @@ class LibvirtConfigObject(object):
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, pretty_print=pretty_print)
LOG.debug(_("Generated XML %s "), (xml_str,))
LOG.debug("Generated XML %s ", (xml_str,))
return xml_str

View File

@ -626,12 +626,12 @@ class LibvirtDriver(driver.ComputeDriver):
self._init_events_pipe()
LOG.debug(_("Starting native event thread"))
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug(_("Starting green dispatch thread"))
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _do_quality_warnings(self):
@ -673,7 +673,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
LOG.debug('Connecting to libvirt: %s', self.uri())
wrapped_conn = None
try:
@ -689,7 +689,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._wrapped_conn = wrapped_conn
try:
LOG.debug(_("Registering for lifecycle events %s"), self)
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
@ -700,7 +700,7 @@ class LibvirtDriver(driver.ComputeDriver):
{'uri': self.uri(), 'error': e})
try:
LOG.debug(_("Registering for connection events: %s") %
LOG.debug("Registering for connection events: %s",
str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
@ -710,8 +710,8 @@ class LibvirtDriver(driver.ComputeDriver):
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug(_("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s"), e)
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_("URI %(uri)s does not support connection"
" events: %(error)s"),
@ -744,7 +744,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
LOG.debug('Connection to libvirt broke')
return False
raise
@ -974,8 +974,8 @@ class LibvirtDriver(driver.ComputeDriver):
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
LOG.debug("Error from libvirt during undefineFlags."
" Retrying with undefine", instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
@ -1141,21 +1141,21 @@ class LibvirtDriver(driver.ComputeDriver):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
LOG.debug('Could not determine iscsi initiator name',
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
LOG.debug('Could not determine fibre channel '
'world wide node names',
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
LOG.debug('Could not determine fibre channel '
'world wide port names',
instance=instance)
connector = {'ip': CONF.my_ip,
@ -1767,7 +1767,7 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug(_("snap xml: %s") % snapshot_xml)
LOG.debug("snap xml: %s", snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
@ -1815,7 +1815,7 @@ class LibvirtDriver(driver.ComputeDriver):
the snapshot is complete
"""
LOG.debug(_("volume_snapshot_create: create_info: %(c_info)s"),
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
@ -1891,7 +1891,7 @@ class LibvirtDriver(driver.ComputeDriver):
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
@ -1927,7 +1927,7 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _('Unable to locate disk matching id: %s') % volume_id
raise exception.NovaException(msg)
LOG.debug(_("found dev, it's %(dev)s, with active disk: %(disk)s"),
LOG.debug("found dev, it's %(dev)s, with active disk: %(disk)s",
{'dev': my_dev, 'disk': active_disk})
if delete_info['merge_target_file'] is None:
@ -1940,22 +1940,22 @@ class LibvirtDriver(driver.ComputeDriver):
rebase_bw = 0
rebase_flags = 0
LOG.debug(_('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s') %
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
LOG.debug('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug(_('blockRebase started successfully'))
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, rebase_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockRebase job completion'))
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
@ -1971,11 +1971,11 @@ class LibvirtDriver(driver.ComputeDriver):
bandwidth, flags)
if result == 0:
LOG.debug(_('blockCommit started successfully'))
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, commit_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockCommit job completion'))
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
@ -2002,7 +2002,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug(_("Instance soft reboot failed: %s"), e)
LOG.debug("Instance soft reboot failed: %s", e)
soft_reboot_success = False
if soft_reboot_success:
@ -2286,7 +2286,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
@ -2424,7 +2424,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug(_("This python runtime does not support direct I/O"))
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
@ -2438,12 +2438,12 @@ class LibvirtDriver(driver.ComputeDriver):
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
@ -2854,12 +2854,12 @@ class LibvirtDriver(driver.ComputeDriver):
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug(_('Updating compute service status to %s'),
status_name[disable_service])
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug(_('Not overriding manual compute service '
'status with: %s'),
status_name[disable_service])
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_('Cannot update service status on host: %s,'
'since it is not registered.') % CONF.host)
@ -2957,8 +2957,8 @@ class LibvirtDriver(driver.ComputeDriver):
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
@ -3396,8 +3396,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if hw_qga.lower() == 'yes':
LOG.debug(_("Qemu guest agent is enabled through image "
"metadata"), instance=instance)
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
qga_enabled = True
if qga_enabled:
@ -3481,7 +3481,7 @@ class LibvirtDriver(driver.ComputeDriver):
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml xml=%(xml)s'),
LOG.debug('End to_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
@ -4116,8 +4116,8 @@ class LibvirtDriver(driver.ComputeDriver):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug(_("Trying to get stats for the volume %s"),
volume_id)
LOG.debug("Trying to get stats for the volume %s",
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
@ -4129,9 +4129,9 @@ class LibvirtDriver(driver.ComputeDriver):
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
_("Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d"),
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
@ -4202,9 +4202,9 @@ class LibvirtDriver(driver.ComputeDriver):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage."),
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
@ -4394,9 +4394,9 @@ class LibvirtDriver(driver.ComputeDriver):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
@ -4792,17 +4792,17 @@ class LibvirtDriver(driver.ComputeDriver):
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug(_('skipping disk for %s as it does not have a path'),
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
LOG.debug('skipping %s since it looks like volume', path)
continue
if target in volume_devices:
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
'volume'), {'path': path, 'target': target})
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
@ -4923,8 +4923,8 @@ class LibvirtDriver(driver.ComputeDriver):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
# Checks if the migration needs a disk resize down.
for kind in ('root_gb', 'ephemeral_gb'):
@ -5068,7 +5068,7 @@ class LibvirtDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_migration"), instance=instance)
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
@ -5108,8 +5108,8 @@ class LibvirtDriver(driver.ComputeDriver):
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
@ -5216,7 +5216,7 @@ class LibvirtDriver(driver.ComputeDriver):
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessibility %s'), instance_path)
LOG.debug('Checking instance files accessibility %s', instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
@ -5320,7 +5320,7 @@ class HostState(object):
available_least = disk_free_gb * units.Gi - disk_over_committed
return (available_least / units.Gi)
LOG.debug(_("Updating host stats"))
LOG.debug("Updating host stats")
disk_info_dict = self.driver.get_local_gb_info()
data = {}

View File

@ -250,7 +250,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
LOG.debug(_('The nwfilter(%s) is not found.'),
LOG.debug('The nwfilter(%s) is not found.',
instance_filter_name, instance=instance)
@staticmethod
@ -268,8 +268,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance['name']
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.'),
LOG.debug('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.',
{'instance_filter_name': instance_filter_name,
'name': name},
instance=instance)
@ -286,7 +286,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basically_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'),
LOG.debug('iptables firewall: Setup Basic Filtering',
instance=instance)
self.refresh_provider_fw_rules()
self.basically_filtered = True

View File

@ -169,7 +169,7 @@ def read_stored_info(target, field=None, timestamped=False):
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def read_file(info_file):
LOG.debug(_('Reading image info file: %s'), info_file)
LOG.debug('Reading image info file: %s', info_file)
with open(info_file, 'r') as f:
return f.read().rstrip()
@ -289,23 +289,23 @@ class ImageCacheManager(imagecache.ImageCacheManager):
inuse_images = []
for ent in os.listdir(CONF.instances_path):
if ent in self.instance_names:
LOG.debug(_('%s is a valid instance name'), ent)
LOG.debug('%s is a valid instance name', ent)
disk_path = os.path.join(CONF.instances_path, ent, 'disk')
if os.path.exists(disk_path):
LOG.debug(_('%s has a disk file'), ent)
LOG.debug('%s has a disk file', ent)
try:
backing_file = virtutils.get_disk_backing_file(
disk_path)
except processutils.ProcessExecutionError:
# (for bug 1261442)
if not os.path.exists(disk_path):
LOG.debug(_('Failed to get disk backing file: %s'),
LOG.debug('Failed to get disk backing file: %s',
disk_path)
continue
else:
raise
LOG.debug(_('Instance %(instance)s is backed by '
'%(backing)s'),
LOG.debug('Instance %(instance)s is backed by '
'%(backing)s',
{'instance': ent,
'backing': backing_file})
@ -425,8 +425,8 @@ class ImageCacheManager(imagecache.ImageCacheManager):
Returns nothing.
"""
if not os.path.exists(base_file):
LOG.debug(_('Cannot remove %s, it does not exist'),
base_file)
LOG.debug('Cannot remove %s, it does not exist',
base_file)
return
mtime = os.path.getmtime(base_file)
@ -506,15 +506,15 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if base_file:
if not image_in_use:
LOG.debug(_('image %(id)s at (%(base_file)s): image is not in '
'use'),
LOG.debug('image %(id)s at (%(base_file)s): image is not in '
'use',
{'id': img_id,
'base_file': base_file})
self.removable_base_files.append(base_file)
else:
LOG.debug(_('image %(id)s at (%(base_file)s): image is in '
'use'),
LOG.debug('image %(id)s at (%(base_file)s): image is in '
'use',
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
@ -522,11 +522,11 @@ class ImageCacheManager(imagecache.ImageCacheManager):
os.utime(base_file, None)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
LOG.debug(_('Verify base images'))
LOG.debug('Verify base images')
# Determine what images are on disk because they're in use
for img in self.used_images:
fingerprint = hashlib.sha1(img).hexdigest()
LOG.debug(_('Image id %(id)s yields fingerprint %(fingerprint)s'),
LOG.debug('Image id %(id)s yields fingerprint %(fingerprint)s',
{'id': img,
'fingerprint': fingerprint})
for result in self._find_base_file(base_dir, fingerprint):
@ -564,7 +564,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
self._remove_base_file(base_file)
# That's it
LOG.debug(_('Verification complete'))
LOG.debug('Verification complete')
def _get_base(self):
@ -581,7 +581,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if not os.path.exists(base_dir):
LOG.debug(_('Skipping verification, no base directory at %s'),
LOG.debug('Skipping verification, no base directory at %s',
base_dir)
return
return base_dir

View File

@ -343,8 +343,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
def get_config(self, instance, vif, image_meta, inst_type):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
@ -406,7 +406,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
LOG.debug('Ensuring vlan %(vlan)s and bridge %(bridge)s',
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
@ -417,7 +417,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug(_("Ensuring bridge %s"),
LOG.debug("Ensuring bridge %s",
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
@ -598,8 +598,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
@ -780,8 +780,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})

View File

@ -244,7 +244,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug(_("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s"),
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
@ -317,8 +317,8 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)"),
LOG.debug("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)",
{'disk_dev': disk_dev,
'tries': tries})
@ -577,7 +577,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug(_("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s"),
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
@ -587,7 +587,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug(_("multipath %(command)s: stdout=%(out)s stderr=%(err)s"),
LOG.debug("multipath %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
@ -675,7 +675,7 @@ class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug(_("The NFS share %s is still in use."), export)
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_("Couldn't unmount the NFS share %s"), export)
@ -765,8 +765,8 @@ class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
tries = self.tries
if tries != 0:
LOG.debug(_("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)"),
LOG.debug("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)",
{'aoedevpath': aoedevpath,
'tries': tries})
@ -825,7 +825,7 @@ class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug(_("The GlusterFS share %s is still in use."), export)
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_("Couldn't unmount the GlusterFS share %s"),
export)
@ -930,7 +930,7 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s"),
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
@ -960,8 +960,8 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)"),
LOG.debug("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)",
{'mount_device': mount_device,
'tries': tries})
@ -969,8 +969,8 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
% {'device': mdev_info['device']})
LOG.debug("Multipath device discovered %(device)s",
{'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
@ -1002,7 +1002,7 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug(_("devices to remove = %s"), devices)
LOG.debug("devices to remove = %s", devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove

View File

@ -160,7 +160,7 @@ class VMwareESXDriver(driver.ComputeDriver):
try:
vim.client.service.Logout(session_manager)
except suds.WebFault:
LOG.debug(_("No vSphere session was open during cleanup_host."))
LOG.debug("No vSphere session was open during cleanup_host.")
def list_instances(self):
"""List VM instances."""
@ -609,7 +609,7 @@ class VMwareVCDriver(VMwareESXDriver):
nodename = self._create_nodename(node,
self.dict_mors.get(node)['name'])
node_list.append(nodename)
LOG.debug(_("The available nodes are: %s") % node_list)
LOG.debug("The available nodes are: %s", node_list)
return node_list
def get_host_stats(self, refresh=True):
@ -864,7 +864,7 @@ class VMwareAPISession(object):
except Exception as e:
LOG.warning(_("Unable to validate session %s!"),
self._session.key)
LOG.debug(_("Exception: %(ex)s"), {'ex': e})
LOG.debug("Exception: %(ex)s", {'ex': e})
return active
def _call_method(self, module, method, *args, **kwargs):
@ -930,13 +930,13 @@ class VMwareAPISession(object):
exc = excep
break
LOG.debug(_("_call_method(session=%(key)s) failed. "
"Module: %(module)s. "
"Method: %(method)s. "
"args: %(args)s. "
"kwargs: %(kwargs)s. "
"Iteration: %(n)s. "
"Exception: %(ex)s. "),
LOG.debug("_call_method(session=%(key)s) failed. "
"Module: %(module)s. "
"Method: %(method)s. "
"args: %(args)s. "
"kwargs: %(kwargs)s. "
"Iteration: %(n)s. "
"Exception: %(ex)s. ",
{'key': self._session.key,
'module': module,
'method': method,
@ -989,8 +989,8 @@ class VMwareAPISession(object):
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success"),
LOG.debug("Task [%(task_name)s] %(task_ref)s "
"status: success",
{'task_name': task_name, 'task_ref': task_ref})
done.send(task_info)
else:

View File

@ -16,7 +16,6 @@
Datastore utility functions
"""
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vm_util
@ -45,7 +44,7 @@ def split_datastore_path(datastore_path):
def file_delete(session, datastore_path, dc_ref):
LOG.debug(_("Deleting the datastore file %s"), datastore_path)
LOG.debug("Deleting the datastore file %s", datastore_path)
vim = session._get_vim()
file_delete_task = session._call_method(
session._get_vim(),
@ -54,7 +53,7 @@ def file_delete(session, datastore_path, dc_ref):
name=datastore_path,
datacenter=dc_ref)
session._wait_for_task(file_delete_task)
LOG.debug(_("Deleted the datastore file"))
LOG.debug("Deleted the datastore file")
def file_move(session, dc_ref, src_file, dst_file):
@ -79,7 +78,7 @@ def file_move(session, dc_ref, src_file, dst_file):
is not covered by the other faults; for example,
a communication error.
"""
LOG.debug(_("Moving file from %(src)s to %(dst)s."),
LOG.debug("Moving file from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
vim = session._get_vim()
move_task = session._call_method(
@ -91,7 +90,7 @@ def file_move(session, dc_ref, src_file, dst_file):
destinationName=dst_file,
destinationDatacenter=dc_ref)
session._wait_for_task(move_task)
LOG.debug(_("File moved"))
LOG.debug("File moved")
def file_exists(session, ds_browser, ds_path, file_name):
@ -118,12 +117,12 @@ def mkdir(session, ds_path, dc_ref):
then a directory with this name is created at the topmost level of the
DataStore.
"""
LOG.debug(_("Creating directory with path %s"), ds_path)
LOG.debug("Creating directory with path %s", ds_path)
session._call_method(session._get_vim(), "MakeDirectory",
session._get_vim().get_service_content().fileManager,
name=ds_path, datacenter=dc_ref,
createParentDirectories=True)
LOG.debug(_("Created directory with path %s"), ds_path)
LOG.debug("Created directory with path %s", ds_path)
def get_sub_folders(session, ds_browser, ds_path):

View File

@ -43,7 +43,7 @@ LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
"""Log DB Contents."""
LOG.debug(_("%(text)s: _db_content => %(content)s"),
LOG.debug("%(text)s: _db_content => %(content)s",
{'text': msg or "", 'content': pprint.pformat(_db_content)})
@ -1064,7 +1064,7 @@ class FakeVim(object):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug(_("Session is faulty"))
LOG.debug("Session is faulty")
raise error_util.VimFaultException(
[error_util.NOT_AUTHENTICATED],
_("Session Invalid"))

View File

@ -18,7 +18,6 @@ Management class for host-related functions (start, reboot, etc).
"""
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
@ -36,7 +35,7 @@ class Host(object):
def host_power_action(self, host, action):
"""Reboots or shuts down the host."""
host_mor = vm_util.get_host_ref(self._session)
LOG.debug(_("%(action)s %(host)s"), {'action': action, 'host': host})
LOG.debug("%(action)s %(host)s", {'action': action, 'host': host})
if action == "reboot":
host_task = self._session._call_method(
self._session._get_vim(),
@ -59,7 +58,7 @@ class Host(object):
guest VMs evacuation.
"""
host_mor = vm_util.get_host_ref(self._session)
LOG.debug(_("Set maintenance mod on %(host)s to %(mode)s"),
LOG.debug("Set maintenance mod on %(host)s to %(mode)s",
{'host': host, 'mode': mode})
if mode:
host_task = self._session._call_method(

View File

@ -74,7 +74,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
LOG.warning(_("Unable to delete %(file)s. Exception: %(ex)s"),
{'file': path, 'ex': e})
except error_util.FileNotFoundException:
LOG.debug(_("File not found: %s"), path)
LOG.debug("File not found: %s", path)
def timestamp_folder_get(self, ds_path, image_id):
"""Returns the timestamp folder."""
@ -85,7 +85,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
ts = self._get_timestamp(ds_browser, ds_path)
if ts:
ts_path = '%s/%s' % (ds_path, ts)
LOG.debug(_("Timestamp path %s exists. Deleting!"), ts_path)
LOG.debug("Timestamp path %s exists. Deleting!", ts_path)
# Image is used - no longer need timestamp folder
self._folder_delete(ts_path, dc_ref)
@ -145,7 +145,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
try:
ds_util.mkdir(self._session, ts_path, dc_info.ref)
except error_util.FileAlreadyExistsException:
LOG.debug(_("Timestamp already exists."))
LOG.debug("Timestamp already exists.")
LOG.info(_("Image %s is no longer used by this node. "
"Pending deletion!"), image)
else:

View File

@ -47,11 +47,11 @@ def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
# in the parent property field rather than a [] in the
# ManagedObjectReference property field of the parent
if not vm_networks_ret:
LOG.debug(_("No networks configured on host!"))
LOG.debug("No networks configured on host!")
return
vm_networks = vm_networks_ret.ManagedObjectReference
network_obj = {}
LOG.debug(_("Configured networks: %s"), vm_networks)
LOG.debug("Configured networks: %s", vm_networks)
for network in vm_networks:
# Get network properties
if network._type == 'DistributedVirtualPortgroup':
@ -77,7 +77,7 @@ def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
network_obj['name'] = network_name
if (len(network_obj) > 0):
return network_obj
LOG.debug(_("Network %s not found on host!"), network_name)
LOG.debug("Network %s not found on host!", network_name)
def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
@ -154,8 +154,8 @@ def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
network_system_mor = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "configManager.networkSystem")
LOG.debug(_("Creating Port Group with name %s on "
"the ESX host") % pg_name)
LOG.debug("Creating Port Group with name %s on "
"the ESX host", pg_name)
try:
session._call_method(session._get_vim(),
"AddPortGroup", network_system_mor,
@ -166,6 +166,6 @@ def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
# the other one will get an exception. Since we are
# concerned with the port group being created, which is done
# by the other call, we can ignore the exception.
LOG.debug(_("Port Group %s already exists."), pg_name)
LOG.debug(_("Created Port Group with name %s on "
"the ESX host") % pg_name)
LOG.debug("Port Group %s already exists.", pg_name)
LOG.debug("Created Port Group with name %s on "
"the ESX host", pg_name)

View File

@ -26,7 +26,6 @@ import urllib2
import six.moves.urllib.parse as urlparse
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
@ -142,8 +141,8 @@ class VMwareHTTPWriteFile(VMwareHTTPFile):
try:
self.conn.getresponse()
except Exception as excep:
LOG.debug(_("Exception during HTTP connection close in "
"VMwareHTTPWrite. Exception is %s") % excep)
LOG.debug("Exception during HTTP connection close in "
"VMwareHTTPWrite. Exception is %s", excep)
super(VMwareHTTPWriteFile, self).close()

View File

@ -1382,13 +1382,13 @@ def get_vmdk_adapter_type(adapter_type):
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug(_("Creating VM on the ESX host"), instance=instance)
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session._get_vim(),
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
task_info = session._wait_for_task(vm_create_task)
LOG.debug(_("Created VM on the ESX host"), instance=instance)
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
@ -1473,15 +1473,15 @@ def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug(_("Cloning VM for instance %s"), instance['uuid'],
instance=instance)
LOG.debug("Cloning VM for instance %s", instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug(_("Cloned VM for instance %s"), instance['uuid'],
instance=instance)
LOG.debug("Cloned VM for instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
@ -1503,13 +1503,13 @@ def disassociate_vmref_from_instance(session, instance, vm_ref=None,
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug(_("Disassociating VM from instance %s"), instance['uuid'],
instance=instance)
LOG.debug("Disassociating VM from instance %s", instance['uuid'],
instance=instance)
reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task",
vm_ref, spec=reconfig_spec)
session._wait_for_task(reconfig_task)
LOG.debug(_("Disassociated VM from instance %s"), instance['uuid'],
instance=instance)
LOG.debug("Disassociated VM from instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
@ -1535,13 +1535,13 @@ def associate_vmref_for_instance(session, instance, vm_ref=None,
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug(_("Associating VM to instance %s"), instance['uuid'],
instance=instance)
LOG.debug("Associating VM to instance %s", instance['uuid'],
instance=instance)
reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task",
vm_ref, spec=reconfig_spec)
session._wait_for_task(reconfig_task)
LOG.debug(_("Associated VM to instance %s"), instance['uuid'],
instance=instance)
LOG.debug("Associated VM to instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])

View File

@ -104,18 +104,18 @@ class VMwareVMOps(object):
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
LOG.debug(_("Getting list of instances"))
LOG.debug("Getting list of instances")
vms = self._session._call_method(vim_util, "get_objects",
"VirtualMachine",
["name", "runtime.connectionState"])
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session._get_vim().get_service_content()
LOG.debug(_("Extending root virtual disk to %s"), requested_size)
LOG.debug("Extending root virtual disk to %s", requested_size)
vmdk_extend_task = self._session._call_method(
self._session._get_vim(),
"ExtendVirtualDisk_Task",
@ -135,7 +135,7 @@ class VMwareVMOps(object):
for file in files:
self._delete_datastore_file(instance, file, dc_ref)
LOG.debug(_("Extended root virtual disk"))
LOG.debug("Extended root virtual disk")
def _delete_datastore_file(self, instance, datastore_path, dc_ref):
try:
@ -144,9 +144,9 @@ class VMwareVMOps(object):
error_util.FileFaultException,
error_util.FileLockedException,
error_util.FileNotFoundException) as e:
LOG.debug(_("Unable to delete %(ds)s. There may be more than "
"one process or thread that tries to delete the file. "
"Exception: %(ex)s"),
LOG.debug("Unable to delete %(ds)s. There may be more than "
"one process or thread that tries to delete the file. "
"Exception: %(ex)s",
{'ds': datastore_path, 'ex': e})
def _get_vmdk_path(self, ds_name, folder, name):
@ -406,7 +406,7 @@ class VMwareVMOps(object):
# process or thread has already completed the opertaion.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.debug(_("File %s already exists"), dest_folder)
LOG.debug("File %s already exists", dest_folder)
# Delete the temp upload folder
self._delete_datastore_file(instance,
@ -423,7 +423,7 @@ class VMwareVMOps(object):
dest_vmdk_path = self._get_vmdk_path(data_store_name,
instance['uuid'], instance_name)
# Create the blank virtual disk for the VM
LOG.debug(_("Create blank virtual disk on %s"),
LOG.debug("Create blank virtual disk on %s",
data_store_name, instance=instance)
vm_util.create_virtual_disk(self._session,
dc_info.ref,
@ -431,7 +431,7 @@ class VMwareVMOps(object):
disk_type,
dest_vmdk_path,
root_gb_in_kb)
LOG.debug(_("Blank virtual disk created on %s."),
LOG.debug("Blank virtual disk created on %s.",
data_store_name, instance=instance)
root_vmdk_path = dest_vmdk_path
else:
@ -467,7 +467,7 @@ class VMwareVMOps(object):
data_store_ref, data_store_name,
upload_folder,
upload_name + ".%s.vmdk" % root_gb):
LOG.debug(_("Copying root disk of size %sGb"), root_gb)
LOG.debug("Copying root disk of size %sGb", root_gb)
try:
copy_spec = self.get_copy_virtual_disk_spec(
client_factory, adapter_type, disk_type)
@ -577,16 +577,16 @@ class VMwareVMOps(object):
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach "
"cdrom %(file_path)s"),
LOG.debug("Reconfiguring VM instance %(instance_name)s to attach "
"cdrom %(file_path)s",
{'instance_name': instance_name, 'file_path': file_path})
reconfig_task = self._session._call_method(
self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=cdrom_attach_config_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach "
"cdrom %(file_path)s"),
LOG.debug("Reconfigured VM instance %(instance_name)s to attach "
"cdrom %(file_path)s",
{'instance_name': instance_name, 'file_path': file_path})
@staticmethod
@ -642,7 +642,7 @@ class VMwareVMOps(object):
disk_type)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug(_("Creating Snapshot of the VM instance"), instance=instance)
LOG.debug("Creating Snapshot of the VM instance", instance=instance)
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
@ -651,7 +651,7 @@ class VMwareVMOps(object):
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug(_("Created Snapshot of the VM instance"), instance=instance)
LOG.debug("Created Snapshot of the VM instance", instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
@ -659,13 +659,13 @@ class VMwareVMOps(object):
return snapshot
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug(_("Deleting Snapshot of the VM instance"), instance=instance)
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
self._session._get_vim(),
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug(_("Deleted Snapshot of the VM instance"), instance=instance)
LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
@ -741,7 +741,7 @@ class VMwareVMOps(object):
copy_spec = self.get_copy_virtual_disk_spec(client_factory,
adapter_type,
disk_type)
LOG.debug(_('Copying snapshotted disk %s.'),
LOG.debug('Copying snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
copy_disk_task = self._session._call_method(
@ -755,7 +755,7 @@ class VMwareVMOps(object):
destSpec=copy_spec,
force=False)
self._session._wait_for_task(copy_disk_task)
LOG.debug(_('Copied snapshotted disk %s.'),
LOG.debug('Copied snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
@ -767,7 +767,7 @@ class VMwareVMOps(object):
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug(_("Uploading image %s") % image_id,
LOG.debug("Uploading image %s", image_id,
instance=instance)
vmware_images.upload_image(
context,
@ -782,7 +782,7 @@ class VMwareVMOps(object):
datastore_name=datastore_name,
cookies=cookies,
file_path="%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
LOG.debug(_("Uploaded image %s") % image_id,
LOG.debug("Uploaded image %s", image_id,
instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
@ -826,16 +826,16 @@ class VMwareVMOps(object):
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning"):
LOG.debug(_("Rebooting guest OS of VM"), instance=instance)
LOG.debug("Rebooting guest OS of VM", instance=instance)
self._session._call_method(self._session._get_vim(), "RebootGuest",
vm_ref)
LOG.debug(_("Rebooted guest OS of VM"), instance=instance)
LOG.debug("Rebooted guest OS of VM", instance=instance)
else:
LOG.debug(_("Doing hard reboot of VM"), instance=instance)
LOG.debug("Doing hard reboot of VM", instance=instance)
reset_task = self._session._call_method(self._session._get_vim(),
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
LOG.debug("Did hard reboot of VM", instance=instance)
def _destroy_instance(self, instance, network_info, destroy_disks=True,
instance_name=None):
@ -861,19 +861,19 @@ class VMwareVMOps(object):
datastore_name, vmx_file_path = _ds_path
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
LOG.debug(_("Powering off the VM"), instance=instance)
LOG.debug("Powering off the VM", instance=instance)
poweroff_task = self._session._call_method(
self._session._get_vim(),
"PowerOffVM_Task", vm_ref)
self._session._wait_for_task(poweroff_task)
LOG.debug(_("Powered off the VM"), instance=instance)
LOG.debug("Powered off the VM", instance=instance)
# Un-register the VM
try:
LOG.debug(_("Unregistering the VM"), instance=instance)
LOG.debug("Unregistering the VM", instance=instance)
self._session._call_method(self._session._get_vim(),
"UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
"exception while un-registering the VM: %s"),
@ -885,9 +885,9 @@ class VMwareVMOps(object):
dir_ds_compliant_path = ds_util.build_datastore_path(
datastore_name,
os.path.dirname(vmx_file_path))
LOG.debug(_("Deleting contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': datastore_name},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
@ -895,9 +895,9 @@ class VMwareVMOps(object):
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
LOG.debug("Deleted contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': datastore_name},
instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
@ -918,12 +918,12 @@ class VMwareVMOps(object):
3. Delete the contents of the folder holding the VM related data.
"""
# If there is a rescue VM then we need to destroy that one too.
LOG.debug(_("Destroying instance"), instance=instance)
LOG.debug("Destroying instance", instance=instance)
if instance['vm_state'] == vm_states.RESCUED:
LOG.debug(_("Rescue VM configured"), instance=instance)
LOG.debug("Rescue VM configured", instance=instance)
try:
self.unrescue(instance, power_on=False)
LOG.debug(_("Rescue VM destroyed"), instance=instance)
LOG.debug("Rescue VM destroyed", instance=instance)
except Exception:
rescue_name = instance['uuid'] + self._rescue_suffix
self._destroy_instance(instance, network_info,
@ -931,7 +931,7 @@ class VMwareVMOps(object):
instance_name=rescue_name)
self._destroy_instance(instance, network_info,
destroy_disks=destroy_disks)
LOG.debug(_("Instance destroyed"), instance=instance)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
@ -949,18 +949,18 @@ class VMwareVMOps(object):
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug(_("Suspending the VM"), instance=instance)
LOG.debug("Suspending the VM", instance=instance)
suspend_task = self._session._call_method(self._session._get_vim(),
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug(_("Suspended the VM"), instance=instance)
LOG.debug("Suspended the VM", instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug(_("VM was already in suspended state. So returning "
"without doing anything"), instance=instance)
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
@ -969,12 +969,12 @@ class VMwareVMOps(object):
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug(_("Resuming the VM"), instance=instance)
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug(_("Resumed the VM"), instance=instance)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
@ -1059,16 +1059,16 @@ class VMwareVMOps(object):
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be powered off.
if pwr_state == "poweredOn":
LOG.debug(_("Powering off the VM"), instance=instance)
LOG.debug("Powering off the VM", instance=instance)
self._power_off_vm_ref(vm_ref)
LOG.debug(_("Powered off the VM"), instance=instance)
LOG.debug("Powered off the VM", instance=instance)
# Raise Exception if VM is suspended
elif pwr_state == "suspended":
reason = _("instance is suspended and cannot be powered off.")
raise exception.InstancePowerOffFailure(reason=reason)
else:
LOG.debug(_("VM was already in powered off state. So returning "
"without doing anything"), instance=instance)
LOG.debug("VM was already in powered off state. So returning "
"without doing anything", instance=instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
@ -1088,8 +1088,8 @@ class VMwareVMOps(object):
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug(_("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d"),
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
@ -1144,16 +1144,16 @@ class VMwareVMOps(object):
vm_ref = vm_util.search_vm_ref_by_identifier(self._session,
instance['uuid'] + self._migrate_suffix)
if vm_ref is None:
LOG.debug(_("instance not present"), instance=instance)
LOG.debug("instance not present", instance=instance)
return
try:
LOG.debug(_("Destroying the VM"), instance=instance)
LOG.debug("Destroying the VM", instance=instance)
destroy_task = self._session._call_method(
self._session._get_vim(),
"Destroy_Task", vm_ref)
self._session._wait_for_task(destroy_task)
LOG.debug(_("Destroyed the VM"), instance=instance)
LOG.debug("Destroyed the VM", instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s") % str(excep))
@ -1199,7 +1199,7 @@ class VMwareVMOps(object):
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug(_("Migrating VM to host %s") % dest, instance=instance_ref)
LOG.debug("Migrating VM to host %s", dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session._get_vim(),
@ -1211,7 +1211,7 @@ class VMwareVMOps(object):
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Migrated VM to host %s") % dest, instance=instance_ref)
LOG.debug("Migrated VM to host %s", dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
@ -1326,13 +1326,13 @@ class VMwareVMOps(object):
client_factory,
self._get_machine_id_str(network_info))
LOG.debug(_("Reconfiguring VM instance to set the machine id"),
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=machine_id_change_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance to set the machine id"),
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
def _set_vnc_config(self, client_factory, instance, port):
@ -1342,15 +1342,15 @@ class VMwareVMOps(object):
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug(_("Reconfiguring VM instance to enable vnc on "
"port - %(port)s") % {'port': port},
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=vnc_config_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance to enable vnc on "
"port - %(port)s") % {'port': port},
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
@ -1412,7 +1412,7 @@ class VMwareVMOps(object):
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug(_("Folder %s created."), path)
LOG.debug("Folder %s created.", path)
except error_util.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
@ -1445,7 +1445,7 @@ class VMwareVMOps(object):
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug(_("Image aging disabled. Aging will not be done."))
LOG.debug("Image aging disabled. Aging will not be done.")
return
datastores = vm_util.get_available_datastores(self._session,
@ -1492,10 +1492,10 @@ class VMwareVCVMOps(VMwareVMOps):
def get_copy_virtual_disk_spec(self, client_factory, adapter_type,
disk_type):
LOG.debug(_("Will copy while retaining adapter type "
"%(adapter_type)s and disk type %(disk_type)s") %
{"disk_type": disk_type,
"adapter_type": adapter_type})
LOG.debug("Will copy while retaining adapter type "
"%(adapter_type)s and disk type %(disk_type)s",
{"disk_type": disk_type,
"adapter_type": adapter_type})
# Passing of the destination copy spec is not supported when
# VirtualDiskManager.CopyVirtualDisk is called on VC. The behavior of a
# spec-less copy is to consolidate to the target disk while keeping its
@ -1544,7 +1544,7 @@ class VMwareVCVMOps(VMwareVMOps):
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug(_("Getting list of instances from cluster %s"),
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
root_res_pool = self._session._call_method(
@ -1556,7 +1556,7 @@ class VMwareVCVMOps(VMwareVMOps):
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
@ -1574,7 +1574,7 @@ class VMwareVCVMOps(VMwareVMOps):
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug(_("VM %(uuid)s is currently on host %(host_name)s"),
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
{'uuid': instance['name'], 'host_name': host_name},
instance=instance)
return vnc_console

View File

@ -21,7 +21,6 @@ import os
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
@ -89,7 +88,7 @@ def start_transfer(context, read_file_handle, data_size,
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug(_("Uploading iso %s to datastore") % iso_path,
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = read_write_util.VMwareHTTPWriteFile(
@ -100,7 +99,7 @@ def upload_iso_to_datastore(iso_path, instance, **kwargs):
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug(_("Uploading iso of size : %s ") %
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
@ -109,7 +108,7 @@ def upload_iso_to_datastore(iso_path, instance, **kwargs):
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug(_("Uploaded iso %s to datastore") % iso_path,
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
@ -144,7 +143,7 @@ def fetch_image(context, instance, host, dc_name, ds_name, file_path,
def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug(_("Uploading image %s to the Glance image server") % image,
LOG.debug("Uploading image %s to the Glance image server", image,
instance=instance)
read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
@ -174,7 +173,7 @@ def upload_image(context, image, instance, **kwargs):
start_transfer(context, read_file_handle, file_size,
image_service=image_service,
image_id=image_id, image_meta=image_metadata)
LOG.debug(_("Uploaded image %s to the Glance image server") % image,
LOG.debug("Uploaded image %s to the Glance image server", image,
instance=instance)
@ -184,11 +183,11 @@ def get_vmdk_size_and_properties(context, image, instance):
geometry of the disk created depends on the size.
"""
LOG.debug(_("Getting image size for the image %s") % image,
LOG.debug("Getting image size for the image %s", image,
instance=instance)
(image_service, image_id) = glance.get_remote_image_service(context, image)
meta_data = image_service.show(context, image_id)
size, properties = meta_data["size"], meta_data["properties"]
LOG.debug(_("Got image size of %(size)s for the image %(image)s"),
LOG.debug("Got image size of %(size)s for the image %(image)s",
{'size': size, 'image': image}, instance=instance)
return size, properties

View File

@ -17,7 +17,6 @@ Helper methods for operations related to the management of volumes,
and storage repositories
"""
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@ -152,10 +151,10 @@ def rescan_iscsi_hba(session, cluster=None, target_portal=None):
break
else:
return
LOG.debug(_("Rescanning HBA %s") % hba_device)
LOG.debug("Rescanning HBA %s", hba_device)
session._call_method(session._get_vim(), "RescanHba", storage_system_mor,
hbaDevice=hba_device)
LOG.debug(_("Rescanned HBA %s ") % hba_device)
LOG.debug("Rescanned HBA %s ", hba_device)
def _add_iscsi_send_target_host(session, storage_system_mor, hba_device,
@ -164,7 +163,7 @@ def _add_iscsi_send_target_host(session, storage_system_mor, hba_device,
client_factory = session._get_vim().client.factory
send_tgt = client_factory.create('ns0:HostInternetScsiHbaSendTarget')
(send_tgt.address, send_tgt.port) = target_portal.split(':')
LOG.debug(_("Adding iSCSI host %s to send targets"), send_tgt.address)
LOG.debug("Adding iSCSI host %s to send targets", send_tgt.address)
session._call_method(
session._get_vim(), "AddInternetScsiSendTargets", storage_system_mor,
iScsiHbaDevice=hba_device, targets=[send_tgt])

View File

@ -62,9 +62,9 @@ class VMwareVolumeOps(object):
if controller_spec:
vmdk_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach "
"disk %(vmdk_path)s or device %(device_name)s with type "
"%(disk_type)s"),
LOG.debug("Reconfiguring VM instance %(instance_name)s to attach "
"disk %(vmdk_path)s or device %(device_name)s with type "
"%(disk_type)s",
{'instance_name': instance_name, 'vmdk_path': vmdk_path,
'device_name': device_name, 'disk_type': disk_type},
instance=instance)
@ -73,9 +73,9 @@ class VMwareVolumeOps(object):
"ReconfigVM_Task", vm_ref,
spec=vmdk_attach_config_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach "
"disk %(vmdk_path)s or device %(device_name)s with type "
"%(disk_type)s"),
LOG.debug("Reconfigured VM instance %(instance_name)s to attach "
"disk %(vmdk_path)s or device %(device_name)s with type "
"%(disk_type)s",
{'instance_name': instance_name, 'vmdk_path': vmdk_path,
'device_name': device_name, 'disk_type': disk_type},
instance=instance)
@ -118,8 +118,8 @@ class VMwareVolumeOps(object):
vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(
client_factory, device, destroy_disk)
disk_key = device.key
LOG.debug(_("Reconfiguring VM instance %(instance_name)s to detach "
"disk %(disk_key)s"),
LOG.debug("Reconfiguring VM instance %(instance_name)s to detach "
"disk %(disk_key)s",
{'instance_name': instance_name, 'disk_key': disk_key},
instance=instance)
reconfig_task = self._session._call_method(
@ -127,8 +127,8 @@ class VMwareVolumeOps(object):
"ReconfigVM_Task", vm_ref,
spec=vmdk_detach_config_spec)
self._session._wait_for_task(reconfig_task)
LOG.debug(_("Reconfigured VM instance %(instance_name)s to detach "
"disk %(disk_key)s"),
LOG.debug("Reconfigured VM instance %(instance_name)s to detach "
"disk %(disk_key)s",
{'instance_name': instance_name, 'disk_key': disk_key},
instance=instance)
@ -136,13 +136,13 @@ class VMwareVolumeOps(object):
"""Discover iSCSI targets."""
target_portal = data['target_portal']
target_iqn = data['target_iqn']
LOG.debug(_("Discovering iSCSI target %(target_iqn)s from "
"%(target_portal)s."),
LOG.debug("Discovering iSCSI target %(target_iqn)s from "
"%(target_portal)s.",
{'target_iqn': target_iqn, 'target_portal': target_portal})
device_name, uuid = volume_util.find_st(self._session, data,
self._cluster)
if device_name:
LOG.debug(_("Storage target found. No need to discover"))
LOG.debug("Storage target found. No need to discover")
return (device_name, uuid)
# Rescan iSCSI HBA with iscsi target host
volume_util.rescan_iscsi_hba(self._session, self._cluster,
@ -151,13 +151,13 @@ class VMwareVolumeOps(object):
device_name, uuid = volume_util.find_st(self._session, data,
self._cluster)
if device_name:
LOG.debug(_("Discovered iSCSI target %(target_iqn)s from "
"%(target_portal)s."),
LOG.debug("Discovered iSCSI target %(target_iqn)s from "
"%(target_portal)s.",
{'target_iqn': target_iqn,
'target_portal': target_portal})
else:
LOG.debug(_("Unable to discovered iSCSI target %(target_iqn)s "
"from %(target_portal)s."),
LOG.debug("Unable to discovered iSCSI target %(target_iqn)s "
"from %(target_portal)s.",
{'target_iqn': target_iqn,
'target_portal': target_portal})
return (device_name, uuid)
@ -224,8 +224,8 @@ class VMwareVolumeOps(object):
instance_name = instance['name']
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Attach Volume to VM
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s, "
"%(mountpoint)s"),
LOG.debug("Attach_volume: %(connection_info)s, %(instance_name)s, "
"%(mountpoint)s",
{'connection_info': connection_info,
'instance_name': instance_name,
'mountpoint': mountpoint},
@ -256,8 +256,8 @@ class VMwareVolumeOps(object):
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
LOG.debug(_("Volume attach. Driver type: %s"), driver_type,
instance=instance)
LOG.debug("Volume attach. Driver type: %s", driver_type,
instance=instance)
if driver_type == 'vmdk':
self._attach_volume_vmdk(connection_info, instance, mountpoint)
elif driver_type == 'iscsi':
@ -327,9 +327,9 @@ class VMwareVolumeOps(object):
if original_device_path == current_device_path:
# The volume is not moved from its original location.
# No consolidation is required.
LOG.debug(_("The volume has not been displaced from "
"its original location: %s. No consolidation "
"needed."), current_device_path)
LOG.debug("The volume has not been displaced from "
"its original location: %s. No consolidation "
"needed.", current_device_path)
return
# The volume has been moved from its original location.
@ -380,7 +380,7 @@ class VMwareVolumeOps(object):
instance_name = instance['name']
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Detach Volume from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"),
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'mountpoint': mountpoint, 'instance_name': instance_name},
instance=instance)
data = connection_info['data']
@ -402,7 +402,7 @@ class VMwareVolumeOps(object):
instance_name = instance['name']
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Detach Volume from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"),
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'mountpoint': mountpoint, 'instance_name': instance_name},
instance=instance)
data = connection_info['data']
@ -429,8 +429,8 @@ class VMwareVolumeOps(object):
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
LOG.debug(_("Volume detach. Driver type: %s"), driver_type,
instance=instance)
LOG.debug("Volume detach. Driver type: %s", driver_type,
instance=instance)
if driver_type == 'vmdk':
self._detach_volume_vmdk(connection_info, instance, mountpoint)
elif driver_type == 'iscsi':
@ -442,7 +442,7 @@ class VMwareVolumeOps(object):
datastore):
"""Attach a root volume to the VM instance."""
driver_type = connection_info['driver_volume_type']
LOG.debug(_("Root volume attach. Driver type: %s"), driver_type,
LOG.debug("Root volume attach. Driver type: %s", driver_type,
instance=instance)
if self._vc_support and driver_type == 'vmdk':
vm_ref = vm_util.get_vm_ref(self._session, instance)

View File

@ -154,8 +154,8 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
instance=instance)
raise exception.AgentError(method=method)
LOG.debug(_('The agent call to %(method)s was successful: '
'%(ret)r. args=%(args)r'),
LOG.debug('The agent call to %(method)s was successful: '
'%(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
@ -188,7 +188,7 @@ class XenAPIBasedAgent(object):
compute_utils.add_instance_fault_from_exc(
ctxt, self.instance, error, exc_info=exc_info)
except Exception:
LOG.debug(_("Error setting instance fault."), exc_info=True)
LOG.debug("Error setting instance fault.", exc_info=True)
def _call_agent(self, method, addl_args=None, timeout=None,
success_codes=None, ignore_errors=True):
@ -202,7 +202,7 @@ class XenAPIBasedAgent(object):
raise
def get_version(self):
LOG.debug(_('Querying agent version'), instance=self.instance)
LOG.debug('Querying agent version', instance=self.instance)
# The agent can be slow to start for a variety of reasons. On Windows,
# it will generally perform a setup process on first boot that can
@ -226,11 +226,11 @@ class XenAPIBasedAgent(object):
ctxt, 'xen', self.instance['os_type'],
self.instance['architecture'])
if agent_build:
LOG.debug(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
LOG.debug('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s', agent_build)
else:
LOG.debug(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
LOG.debug('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s', {
'hypervisor': 'xen',
'os': self.instance['os_type'],
'architecture': self.instance['architecture']})
@ -240,11 +240,11 @@ class XenAPIBasedAgent(object):
agent_build = self._get_expected_build()
if version and agent_build and \
is_upgrade_required(version, agent_build['version']):
LOG.debug(_('Updating agent to %s'), agent_build['version'],
LOG.debug('Updating agent to %s', agent_build['version'],
instance=self.instance)
self._perform_update(agent_build)
else:
LOG.debug(_('Skipping agent update.'), instance=self.instance)
LOG.debug('Skipping agent update.', instance=self.instance)
def _perform_update(self, agent_build):
args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']}
@ -283,7 +283,7 @@ class XenAPIBasedAgent(object):
We're using a simple Diffie-Hellman class instead of a more advanced
library (such as M2Crypto) for compatibility with the agent code.
"""
LOG.debug(_('Setting admin password'), instance=self.instance)
LOG.debug('Setting admin password', instance=self.instance)
try:
dh = self._exchange_key_with_agent()
@ -305,12 +305,12 @@ class XenAPIBasedAgent(object):
return
if self.instance['os_type'] == 'windows':
LOG.debug(_("Skipping setting of ssh key for Windows."),
LOG.debug("Skipping setting of ssh key for Windows.",
instance=self.instance)
return
if self._skip_ssh_key_inject():
LOG.debug(_("Skipping agent ssh key injection for this image."),
LOG.debug("Skipping agent ssh key injection for this image.",
instance=self.instance)
return
@ -327,14 +327,14 @@ class XenAPIBasedAgent(object):
def inject_files(self, injected_files):
if self._skip_inject_files_at_boot():
LOG.debug(_("Skipping agent file injection for this image."),
LOG.debug("Skipping agent file injection for this image.",
instance=self.instance)
else:
for path, contents in injected_files:
self.inject_file(path, contents)
def inject_file(self, path, contents):
LOG.debug(_('Injecting file path: %r'), path, instance=self.instance)
LOG.debug('Injecting file path: %r', path, instance=self.instance)
# Files/paths must be base64-encoded for transmission to agent
b64_path = base64.b64encode(path)
@ -344,7 +344,7 @@ class XenAPIBasedAgent(object):
return self._call_agent('inject_file', args)
def resetnetwork(self):
LOG.debug(_('Resetting network'), instance=self.instance)
LOG.debug('Resetting network', instance=self.instance)
#NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success
return self._call_agent('resetnetwork',

View File

@ -221,8 +221,8 @@ class XenAPISession(object):
if callback:
callback_result = callback(kwargs)
msg = _('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, '
'callback_result: %(callback_result)s')
msg = ('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, '
'callback_result: %(callback_result)s')
LOG.debug(msg,
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts,
@ -247,11 +247,11 @@ class XenAPISession(object):
def _is_retryable_exception(self, exc, fn):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug(_("RetryableError, so retrying %(fn)s"), {'fn': fn},
LOG.debug("RetryableError, so retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
elif "signal" in method:
LOG.debug(_("Error due to a signal, retrying %(fn)s"), {'fn': fn},
LOG.debug("Error due to a signal, retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
else:
@ -269,7 +269,7 @@ class XenAPISession(object):
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug(_("Got exception: %s"), exc)
LOG.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
@ -283,7 +283,7 @@ class XenAPISession(object):
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug(_("Got exception: %s"), exc)
LOG.debug("Got exception: %s", exc)
raise
def get_rec(self, record_type, ref):

View File

@ -77,7 +77,7 @@ LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug(_("%(text)s: _db_content => %(content)s"),
LOG.debug("%(text)s: _db_content => %(content)s",
{'text': text, 'content': content})
@ -807,7 +807,7 @@ class SessionBase(object):
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug(_('Raising NotImplemented'))
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
@ -842,16 +842,16 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
LOG.debug(_('Calling %(name)s %(impl)s'),
LOG.debug('Calling %(name)s %(impl)s',
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug(_('Calling getter %s'), name)
LOG.debug('Calling getter %s', name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug(_('Calling setter %s'), name)
LOG.debug('Calling setter %s', name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
@ -915,7 +915,7 @@ class SessionBase(object):
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug(_('Raising NotImplemented'))
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
@ -934,7 +934,7 @@ class SessionBase(object):
_db_content[cls][ref][field] = val
return
LOG.debug(_('Raising NotImplemented'))
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
@ -1002,7 +1002,7 @@ class SessionBase(object):
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
LOG.debug('Raising NotImplemented')
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):

View File

@ -16,7 +16,6 @@
# under the License.
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt import firewall
@ -72,7 +71,7 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
ipv6_rules = []
rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
LOG.debug('Adding provider rule: %s', rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules

View File

@ -229,7 +229,7 @@ class HostState(object):
"""Since under Xenserver, a compute node runs on a given host,
we can get host status information using xenapi.
"""
LOG.debug(_("Updating host stats"))
LOG.debug("Updating host stats")
data = call_xenhost(self._session, "host_data", {})
if data:
sr_ref = vm_utils.scan_default_sr(self._session)

View File

@ -103,8 +103,8 @@ class BittorrentStore(object):
" found. Failing."))
else:
ep = matches[0]
LOG.debug(_("Loading torrent URL fetcher from entry points"
" %(ep)s"), {'ep': ep})
LOG.debug("Loading torrent URL fetcher from entry points"
" %(ep)s", {'ep': ep})
fn = ep.load()
return fn

View File

@ -331,7 +331,7 @@ def create_vm(session, instance, name_label, kernel, ramdisk,
rec['platform']['device_id'] = device_id
vm_ref = session.VM.create(rec)
LOG.debug(_('Created VM'), instance=instance)
LOG.debug('Created VM', instance=instance)
return vm_ref
@ -343,7 +343,7 @@ def destroy_vm(session, instance, vm_ref):
LOG.exception(exc)
return
LOG.debug(_("VM destroyed"), instance=instance)
LOG.debug("VM destroyed", instance=instance)
def clean_shutdown_vm(session, instance, vm_ref):
@ -352,7 +352,7 @@ def clean_shutdown_vm(session, instance, vm_ref):
instance=instance)
return True
LOG.debug(_("Shutting down VM (cleanly)"), instance=instance)
LOG.debug("Shutting down VM (cleanly)", instance=instance)
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure as exc:
@ -367,7 +367,7 @@ def hard_shutdown_vm(session, instance, vm_ref):
instance=instance)
return True
LOG.debug(_("Shutting down VM (hard)"), instance=instance)
LOG.debug("Shutting down VM (hard)", instance=instance)
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure as exc:
@ -478,12 +478,12 @@ def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... '),
LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ',
{'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.'),
LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.',
{'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
if osvol:
# set osvol=True in other-config to indicate this is an
@ -507,8 +507,9 @@ def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure:
msg = _("Unable to destroy VDI %s") % vdi_ref
msg = "Unable to destroy VDI %s" % vdi_ref
LOG.debug(msg, exc_info=True)
msg = _("Unable to destroy VDI %s") % vdi_ref
LOG.error(msg)
raise exception.StorageError(reason=msg)
@ -519,7 +520,7 @@ def safe_destroy_vdis(session, vdi_refs):
try:
destroy_vdi(session, vdi_ref)
except exception.StorageError:
msg = _("Ignoring error while destroying VDI: %s") % vdi_ref
msg = "Ignoring error while destroying VDI: %s" % vdi_ref
LOG.debug(msg)
@ -538,8 +539,8 @@ def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
'other_config': _get_vdi_other_config(disk_type, instance=instance),
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'),
LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.',
{'vdi_ref': vdi_ref, 'name_label': name_label,
'virtual_size': virtual_size, 'read_only': read_only,
'sr_ref': sr_ref})
@ -670,8 +671,8 @@ def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s'),
LOG.debug('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s',
{'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref})
return vdi_ref
@ -755,7 +756,7 @@ def _try_strip_base_mirror_from_vdi(session, vdi_ref):
session.call_xenapi("VDI.remove_from_sm_config", vdi_ref,
"base_mirror")
except session.XenAPI.Failure:
LOG.debug(_("Error while removing sm_config"), exc_info=True)
LOG.debug("Error while removing sm_config", exc_info=True)
def strip_base_mirror_from_vdis(session, vm_ref):
@ -779,7 +780,7 @@ def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice,
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
LOG.debug(_("Starting snapshot for VM"), instance=instance)
LOG.debug("Starting snapshot for VM", instance=instance)
# Memorize the VDI chain so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref,
@ -854,7 +855,7 @@ def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'"))
LOG.debug("Destroying cached VDI '%(vdi_uuid)s'")
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
@ -946,8 +947,8 @@ def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
virtual_size = _vdi_get_virtual_size(session, vdi_ref)
new_disk_size = new_gb * units.Gi
msg = _("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size},
instance=instance)
@ -957,7 +958,7 @@ def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
_vdi_resize(session, vdi_ref, new_disk_size)
elif virtual_size == new_disk_size:
LOG.debug(_("No need to change vdi virtual size."),
LOG.debug("No need to change vdi virtual size.",
instance=instance)
else:
@ -1020,7 +1021,7 @@ def _auto_configure_disk(session, vdi_ref, new_gb):
3. The file-system on the one partition must be ext3 or ext4.
"""
if new_gb == 0:
LOG.debug(_("Skipping auto_config_disk as destination size is 0GB"))
LOG.debug("Skipping auto_config_disk as destination size is 0GB")
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
@ -1124,7 +1125,7 @@ def _generate_disk(session, instance, vm_ref, userdevice, name_label,
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Error while generating disk number: %s") % userdevice
msg = "Error while generating disk number: %s" % userdevice
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
@ -1187,9 +1188,9 @@ def generate_ephemeral(session, instance, vm_ref, first_userdevice,
vdi_refs.append(ref)
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.debug(_("Error when generating ephemeral disk. "
"Device: %(userdevice)s Size GB: %(size_gb)s "
"Error: %(exc)s"), {
LOG.debug("Error when generating ephemeral disk. "
"Device: %(userdevice)s Size GB: %(size_gb)s "
"Error: %(exc)s", {
'userdevice': userdevice,
'size_gb': size_gb,
'exc': exc})
@ -1232,7 +1233,7 @@ def generate_configdrive(session, instance, vm_ref, userdevice,
read_only=True)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Error while generating config drive")
msg = "Error while generating config drive"
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
@ -1284,7 +1285,7 @@ def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
if ramdisk:
args['ramdisk-file'] = ramdisk
if args:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
LOG.debug("Removing kernel/ramdisk files from dom0",
instance=instance)
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
@ -1418,8 +1419,8 @@ def _fetch_image(context, session, instance, name_label, image_id, image_type):
for vdi_type, vdi in vdis.iteritems():
vdi_uuid = vdi['uuid']
LOG.debug(_("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'"),
LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'",
{'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid},
instance=instance)
@ -1484,7 +1485,7 @@ def _fetch_vhd_image(context, session, instance, image_id):
Returns: A list of dictionaries that describe VDIs
"""
LOG.debug(_("Asking xapi to fetch vhd image %s"), image_id,
LOG.debug("Asking xapi to fetch vhd image %s", image_id,
instance=instance)
handler = _choose_download_handler(context, instance)
@ -1516,7 +1517,7 @@ def _fetch_vhd_image(context, session, instance, image_id):
_check_vdi_size(context, session, instance, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Error while checking vdi size")
msg = "Error while checking vdi size"
LOG.debug(msg, instance=instance, exc_info=True)
for vdi in vdis.values():
vdi_uuid = vdi['uuid']
@ -1537,8 +1538,8 @@ def _get_vdi_chain_size(session, vdi_uuid):
for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d'),
LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d',
{'cur_vdi_uuid': cur_vdi_uuid,
'vdi_size_bytes': vdi_size_bytes})
size_bytes += vdi_size_bytes
@ -1579,7 +1580,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
image_type_str = ImageType.to_string(image_type)
LOG.debug(_("Fetching image %(image_id)s, type %(image_type_str)s"),
LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s",
{'image_id': image_id, 'image_type_str': image_type_str},
instance=instance)
@ -1596,7 +1597,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
virtual_size = image.get_size()
vdi_size = virtual_size
LOG.debug(_("Size for image %(image_id)s: %(virtual_size)d"),
LOG.debug("Size for image %(image_id)s: %(virtual_size)d",
{'image_id': image_id, 'virtual_size': virtual_size},
instance=instance)
if image_type == ImageType.DISK:
@ -1625,7 +1626,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
# We need to invoke a plugin for copying the
# content of the VDI into the proper path.
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"),
LOG.debug("Copying VDI %s to /boot/guest on dom0",
vdi_ref, instance=instance)
args = {}
@ -1639,7 +1640,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
# Remove the VDI as it is not needed anymore.
destroy_vdi(session, vdi_ref)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref,
LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref,
instance=instance)
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=None, file=filename)}
@ -1692,7 +1693,7 @@ def determine_disk_image_type(image_meta):
'image_type_str': ImageType.to_string(image_type),
'image_ref': image_ref
}
LOG.debug(_("Detected %(image_type_str)s format for image %(image_ref)s"),
LOG.debug("Detected %(image_type_str)s format for image %(image_ref)s",
params)
return image_type
@ -1743,7 +1744,7 @@ def lookup_vm_vdis(session, vm_ref):
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
# Test valid VDI
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
LOG.debug(_('VDI %s is still available'), vdi_uuid)
LOG.debug('VDI %s is still available', vdi_uuid)
vbd_other_config = session.call_xenapi("VBD.get_other_config",
vbd_ref)
if not vbd_other_config.get('osvol'):
@ -1875,7 +1876,7 @@ def _scan_sr(session, sr_ref=None, max_attempts=4):
# in host.update_status starts racing the sr.scan after a plugin call.
@utils.synchronized('sr-scan-' + sr_ref)
def do_scan(sr_ref):
LOG.debug(_("Scanning SR %s"), sr_ref)
LOG.debug("Scanning SR %s", sr_ref)
attempt = 1
while True:
@ -1961,30 +1962,30 @@ def _find_iso_sr(session):
"""Return the storage repository to hold ISO images."""
host = session.host_ref
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %s"), sr_rec)
LOG.debug("ISO: looking at SR %s", sr_rec)
if not sr_rec['content_type'] == 'iso':
LOG.debug(_("ISO: not iso content"))
LOG.debug("ISO: not iso content")
continue
if 'i18n-key' not in sr_rec['other_config']:
LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
LOG.debug("ISO: iso content_type, no 'i18n-key' key")
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug(_("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'"))
LOG.debug("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'")
continue
LOG.debug(_("ISO: SR MATCHing our criteria"))
LOG.debug("ISO: SR MATCHing our criteria")
for pbd_ref in sr_rec['PBDs']:
LOG.debug(_("ISO: ISO, looking to see if it is host local"))
LOG.debug("ISO: ISO, looking to see if it is host local")
pbd_rec = session.get_rec('PBD', pbd_ref)
if not pbd_rec:
LOG.debug(_("ISO: PBD %s disappeared"), pbd_ref)
LOG.debug("ISO: PBD %s disappeared", pbd_ref)
continue
pbd_rec_host = pbd_rec['host']
LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s"),
LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s",
{'pbd_rec': pbd_rec, 'host': host})
if pbd_rec_host == host:
LOG.debug(_("ISO: SR with local PBD"))
LOG.debug("ISO: SR with local PBD")
return sr_ref
return None
@ -2041,7 +2042,7 @@ def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None):
parent_uuid = vdi_rec['sm_config']['vhd-parent']
vdi_uuid = vdi_rec['uuid']
LOG.debug(_('VHD %(vdi_uuid)s has parent %(parent_uuid)s'),
LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s',
{'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid})
return parent_uuid
@ -2122,8 +2123,8 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if parent_uuid and (parent_uuid not in vdi_uuid_list):
LOG.debug(_("Parent %(parent_uuid)s not yet in parent list"
" %(vdi_uuid_list)s, waiting for coalesce..."),
LOG.debug("Parent %(parent_uuid)s not yet in parent list"
" %(vdi_uuid_list)s, waiting for coalesce...",
{'parent_uuid': parent_uuid,
'vdi_uuid_list': vdi_uuid_list},
instance=instance)
@ -2202,23 +2203,23 @@ def vdi_attached_here(session, vdi_ref, read_only=False):
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
try:
LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
LOG.debug('Plugging VBD %s ... ', vbd_ref)
session.VBD.plug(vbd_ref, this_vm_ref)
try:
LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
LOG.debug('Plugging VBD %s done.', vbd_ref)
orig_dev = session.call_xenapi("VBD.get_device", vbd_ref)
LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s'),
LOG.debug('VBD %(vbd_ref)s plugged as %(orig_dev)s',
{'vbd_ref': vbd_ref, 'orig_dev': orig_dev})
dev = _remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s'),
LOG.debug('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s',
{'vbd_ref': vbd_ref, 'dev': dev})
_wait_for_device(dev)
yield dev
finally:
utils.execute('sync', run_as_root=True)
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref)
unplug_vbd(session, vbd_ref, this_vm_ref)
finally:
try:
@ -2226,7 +2227,7 @@ def vdi_attached_here(session, vdi_ref, read_only=False):
except exception.StorageError:
# destroy_vbd() will log error
pass
LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
LOG.debug('Destroying VBD for VDI %s done.', vdi_ref)
def _get_sys_hypervisor_uuid():
@ -2268,7 +2269,7 @@ def _get_partitions(dev):
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug(_("Partitions:"))
LOG.debug("Partitions:")
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
@ -2276,7 +2277,7 @@ def _get_partitions(dev):
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(_(" %(num)s: %(fstype)s %(size)d sectors"),
LOG.debug(" %(num)s: %(fstype)s %(size)d sectors",
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
@ -2302,8 +2303,8 @@ def _write_partition(session, virtual_size, dev):
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...'),
LOG.debug('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...',
{'primary_first': primary_first, 'primary_last': primary_last,
'dev_path': dev_path})
@ -2311,7 +2312,7 @@ def _write_partition(session, virtual_size, dev):
return utils.execute(*cmd, **kwargs)
_make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last)
LOG.debug(_('Writing partition table %s done.'), dev_path)
LOG.debug('Writing partition table %s done.', dev_path)
def _repair_filesystem(partition_path):
@ -2376,9 +2377,9 @@ def _log_progress_if_required(left, last_log_time, virtual_size):
if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS):
last_log_time = timeutils.utcnow()
complete_pct = float(virtual_size - left) / virtual_size * 100
LOG.debug(_("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy"),
LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": complete_pct, "left": left})
return last_log_time
@ -2391,8 +2392,8 @@ def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
skipped_bytes = 0
left = virtual_size
LOG.debug(_("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"),
LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d",
{'src_path': src_path, 'dst_path': dst_path,
'virtual_size': virtual_size, 'block_size': block_size})
@ -2427,8 +2428,8 @@ def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
compression_pct = float(skipped_bytes) / bytes_read * 100
LOG.debug(_("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size"),
LOG.debug("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size",
{'duration': duration, 'compression_pct': compression_pct})
@ -2659,7 +2660,7 @@ def _import_migrated_vhds(session, instance, chain_label, disk_type,
def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
ephemeral_number=0):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d",
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
chain_label = instance['uuid']

View File

@ -167,7 +167,7 @@ class VMOps(object):
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
LOG.debug(_("Importing image upload handler: %s"),
LOG.debug("Importing image upload handler: %s",
CONF.xenserver.image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenserver.image_upload_handler)
@ -302,7 +302,7 @@ class VMOps(object):
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
LOG.debug("Starting instance", instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
@ -329,7 +329,7 @@ class VMOps(object):
name_label=None, rescue=False):
if block_device_info:
LOG.debug(_("Block device information present: %s"),
LOG.debug("Block device information present: %s",
block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
@ -562,7 +562,7 @@ class VMOps(object):
image_properties = image_meta.get("properties")
device_id = vm_utils.get_vm_device_id(self._session, image_properties)
use_pv_kernel = (mode == vm_mode.XEN)
LOG.debug(_("Using PV kernel: %s"), use_pv_kernel, instance=instance)
LOG.debug("Using PV kernel: %s", use_pv_kernel, instance=instance)
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file,
use_pv_kernel, device_id)
@ -588,8 +588,8 @@ class VMOps(object):
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize root disk..."), instance=instance)
LOG.debug("Auto configuring disk, attempting to "
"resize root disk...", instance=instance)
vm_utils.try_auto_configure_disk(self._session,
root_vdi['ref'],
flavor['root_gb'])
@ -641,7 +641,7 @@ class VMOps(object):
files=files)
def _wait_for_instance_to_start(self, instance, vm_ref):
LOG.debug(_('Waiting for instance state to become running'),
LOG.debug('Waiting for instance state to become running',
instance=instance)
expiration = time.time() + CONF.xenserver.running_timeout
while time.time() < expiration:
@ -653,18 +653,18 @@ class VMOps(object):
def _configure_new_instance_with_agent(self, instance, vm_ref,
injected_files, admin_password):
if not self.agent_enabled(instance):
LOG.debug(_("Skip agent setup, not enabled."), instance=instance)
LOG.debug("Skip agent setup, not enabled.", instance=instance)
return
agent = self._get_agent(instance, vm_ref)
version = agent.get_version()
if not version:
LOG.debug(_("Skip agent setup, unable to contact agent."),
LOG.debug("Skip agent setup, unable to contact agent.",
instance=instance)
return
LOG.debug(_('Detected agent version: %s'), version, instance=instance)
LOG.debug('Detected agent version: %s', version, instance=instance)
# NOTE(johngarbutt) the agent object allows all of
# the following steps to silently fail
@ -753,7 +753,7 @@ class VMOps(object):
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
LOG.debug("Finished snapshot and upload for VM",
instance=instance)
def _get_orig_vm_name_label(self, instance):
@ -772,19 +772,19 @@ class VMOps(object):
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %d"), progress,
LOG.debug("Updating progress to %d", progress,
instance=instance)
instance.progress = progress
instance.save()
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
LOG.debug("VM was already shutdown.", instance=instance)
return
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
LOG.debug("Clean shutdown did not complete successfully, "
"trying hard shutdown.", instance=instance)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
@ -892,7 +892,7 @@ class VMOps(object):
start=1):
vm_utils.migrate_vhd(self._session, instance, vdi_uuid, dest,
sr_path, vhd_num)
LOG.debug(_("Migrated root base vhds"), instance=instance)
LOG.debug("Migrated root base vhds", instance=instance)
return active_root_vdi_uuid
def _process_ephemeral_chain_recursive(ephemeral_chains,
@ -908,7 +908,7 @@ class VMOps(object):
# If we get here, we have snapshotted and migrated
# all the ephemeral disks, so its time to power down
# and complete the migration of the diffs since the snapshot
LOG.debug(_("Migrated all base vhds."), instance=instance)
LOG.debug("Migrated all base vhds.", instance=instance)
return power_down_and_transfer_leaf_vhds(
active_root_vdi_uuid,
active_vdi_uuids)
@ -942,7 +942,7 @@ class VMOps(object):
dest, sr_path, seq_num,
ephemeral_disk_number)
LOG.debug(_("Read-only migrated for disk: %s"), userdevice,
LOG.debug("Read-only migrated for disk: %s", userdevice,
instance=instance)
# This is recursive to simplify the taking and cleaning up
# of all the ephemeral disk snapshots
@ -1077,7 +1077,7 @@ class VMOps(object):
root_vdi = vdis.get('root')
if new_root_gb and root_vdi:
if root_vdi.get('osvol', False): # Don't resize root volumes.
LOG.debug(_("Not resizing the root volume."),
LOG.debug("Not resizing the root volume.",
instance=instance)
else:
vdi_ref = root_vdi['ref']
@ -1262,7 +1262,7 @@ class VMOps(object):
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
LOG.debug("Destroying VDIs", instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
@ -1289,8 +1289,8 @@ class VMOps(object):
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
LOG.debug("Using RAW or VHD, skipping kernel and ramdisk "
"deletion", instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
@ -1304,7 +1304,7 @@ class VMOps(object):
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, instance,
kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
LOG.debug("kernel/ramdisk files removed", instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
@ -1653,7 +1653,7 @@ class VMOps(object):
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
LOG.debug("Injecting network info to xenstore", instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
def update_nwinfo():
@ -1676,7 +1676,7 @@ class VMOps(object):
def _create_vifs(self, instance, vm_ref, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
LOG.debug("Creating vifs", instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_domid", vm_ref)
@ -1685,10 +1685,10 @@ class VMOps(object):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %s'),
LOG.debug('Creating VIF for network %s',
network_ref, instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
LOG.debug('Created VIF %(vif_ref)s, network %(network_ref)s',
{'vif_ref': vif_ref, 'network_ref': network_ref},
instance=instance)
@ -1723,7 +1723,7 @@ class VMOps(object):
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname (%s) into xenstore"), hostname,
LOG.debug("Injecting hostname (%s) into xenstore", hostname,
instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
@ -1733,7 +1733,7 @@ class VMOps(object):
update_hostname()
def _remove_hostname(self, instance, vm_ref):
LOG.debug(_("Removing hostname from xenstore"), instance=instance)
LOG.debug("Removing hostname from xenstore", instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
def update_hostname():

View File

@ -51,17 +51,17 @@ def _handle_sr_params(params):
def introduce_sr(session, sr_uuid, label, params):
LOG.debug(_('Introducing SR %s'), label)
LOG.debug('Introducing SR %s', label)
sr_type, sr_desc = _handle_sr_params(params)
sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc,
sr_type, '', False, params)
LOG.debug(_('Creating PBD for SR'))
LOG.debug('Creating PBD for SR')
pbd_ref = create_pbd(session, sr_ref, params)
LOG.debug(_('Plugging SR'))
LOG.debug('Plugging SR')
session.call_xenapi("PBD.plug", pbd_ref)
session.call_xenapi("SR.scan", sr_ref)
@ -70,7 +70,7 @@ def introduce_sr(session, sr_uuid, label, params):
def forget_sr(session, sr_ref):
"""Forgets the storage repository without destroying the VDIs within."""
LOG.debug(_('Forgetting SR...'))
LOG.debug('Forgetting SR...')
_unplug_pbds(session, sr_ref)
session.call_xenapi("SR.forget", sr_ref)
@ -248,8 +248,8 @@ def _parse_volume_info(connection_data):
"port": target_port,
"iqn": target_iqn
}
LOG.debug(_('(vol_id,host,port,iqn): '
'(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)'), log_params)
LOG.debug('(vol_id,host,port,iqn): '
'(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)', log_params)
if (volume_id is None or
target_host is None or

View File

@ -90,7 +90,7 @@ class VolumeOps(object):
return (sr_ref, sr_uuid)
def _connect_hypervisor_to_volume(self, sr_ref, connection_data):
LOG.debug(_("Connect volume to hypervisor: %s"), connection_data)
LOG.debug("Connect volume to hypervisor: %s", connection_data)
if 'vdi_uuid' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
@ -120,7 +120,7 @@ class VolumeOps(object):
# NOTE(johngarbutt) can only call VBD.plug on a running vm
running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
if running:
LOG.debug(_("Plugging VBD: %s") % vbd_ref)
LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info(_('Dev %(dev_number)s attached to'
@ -129,7 +129,7 @@ class VolumeOps(object):
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"),
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'instance_name': instance_name, 'mountpoint': mountpoint})
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)