Remove context object in oslo.log method

Removed context object while logging as Nova uses oslo.context's
RequestContext which means the context object is in scope when doing
logging. Added hack to notify, in case if someone uses it in logging
statements in future.

Change-Id: I5aaa869f2e6155964827e659d18e2bcaad9d866b
Closes-Bug:#1500896
This commit is contained in:
Sivasathurappan Radhakrishnan 2016-05-16 22:09:44 +00:00
parent 69784047ea
commit b3d58ed718
8 changed files with 111 additions and 79 deletions

View File

@ -63,6 +63,7 @@ Nova Specific Commandments
- [N350] Policy registration should be in the central location ``nova/policies/``
- [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method.
- [N352] LOG.warn is deprecated. Enforce use of LOG.warning.
- [N353] Validate that context objects is not passed in logging calls.
Creating Unit Tests
-------------------

View File

@ -1827,14 +1827,14 @@ class API(base.Base):
'id: %(id)s, status: %(status)s'),
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_LI('Instance may have been confirmed during delete'),
context=context, instance=instance)
instance=instance)
return
src_host = migration.source_compute
@ -1851,8 +1851,7 @@ class API(base.Base):
deltas = compute_utils.downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_LI('Migration %s may have been confirmed during '
'delete'),
migration.id, context=context, instance=instance)
'delete'), migration.id, instance=instance)
return
quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
@ -2500,7 +2499,7 @@ class API(base.Base):
if compute_utils.is_volume_backed_instance(context, instance):
LOG.info(_LI("It's not supported to backup volume backed "
"instance."), context=context, instance=instance)
"instance."), instance=instance)
raise exception.InvalidRequest()
else:
image_meta = self._create_image(context, instance,
@ -2646,7 +2645,7 @@ class API(base.Base):
else:
LOG.info(_LI('Skipping quiescing instance: '
'%(reason)s.'), {'reason': err},
context=context, instance=instance)
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@ -3312,7 +3311,7 @@ class API(base.Base):
return
context = context.elevated()
LOG.debug('Locking', context=context, instance=instance)
LOG.debug('Locking', instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@ -3328,7 +3327,7 @@ class API(base.Base):
def unlock(self, context, instance):
"""Unlock the given instance."""
context = context.elevated()
LOG.debug('Unlocking', context=context, instance=instance)
LOG.debug('Unlocking', instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@ -4443,7 +4442,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.info(_LI("Create Security Group %s"), name, context=context)
LOG.info(_LI("Create Security Group %s"), name)
try:
self.ensure_default(context)
@ -4551,8 +4550,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
LOG.exception(_LE("Failed to update usages deallocating "
"security group"))
LOG.info(_LI("Delete security group %s"), security_group['name'],
context=context)
LOG.info(_LI("Delete security group %s"), security_group['name'])
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations

View File

@ -1746,8 +1746,7 @@ class ComputeManager(manager.Manager):
node=None, limits=None):
try:
LOG.debug('Starting instance...', context=context,
instance=instance)
LOG.debug('Starting instance...', instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
@ -2170,8 +2169,7 @@ class ComputeManager(manager.Manager):
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('Terminating instance'),
context=context, instance=instance)
LOG.info(_LI('Terminating instance'), instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
@ -2653,8 +2651,7 @@ class ComputeManager(manager.Manager):
"""
context = context.elevated()
LOG.info(_LI("Rebuilding instance"), context=context,
instance=instance)
LOG.info(_LI("Rebuilding instance"), instance=instance)
if scheduled_node is not None:
rt = self._get_resource_tracker(scheduled_node)
rebuild_claim = rt.rebuild_claim
@ -2909,7 +2906,7 @@ class ComputeManager(manager.Manager):
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
LOG.info(_LI("Rebooting instance"), instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
@ -2927,7 +2924,7 @@ class ComputeManager(manager.Manager):
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
@ -2961,7 +2958,7 @@ class ComputeManager(manager.Manager):
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance)
instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
@ -2969,7 +2966,7 @@ class ComputeManager(manager.Manager):
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance)
instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
@ -2981,7 +2978,7 @@ class ComputeManager(manager.Manager):
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance)
instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@ -3045,8 +3042,7 @@ class ComputeManager(manager.Manager):
try:
instance.save()
LOG.info(_LI('instance snapshotting'), context=context,
instance=instance)
LOG.info(_LI('instance snapshotting'), instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
@ -3275,7 +3271,7 @@ class ComputeManager(manager.Manager):
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref, clean_shutdown):
context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance)
LOG.info(_LI('Rescuing'), instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
@ -3324,7 +3320,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
LOG.info(_LI('Unrescuing'), instance=instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
@ -3365,7 +3361,7 @@ class ComputeManager(manager.Manager):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance)
instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
@ -3373,13 +3369,13 @@ class ComputeManager(manager.Manager):
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance)
migration_id, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance)
migration_id, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
@ -3387,7 +3383,7 @@ class ComputeManager(manager.Manager):
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
instance=instance)
quotas.rollback()
return
@ -3400,7 +3396,7 @@ class ComputeManager(manager.Manager):
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance)
instance=instance)
quotas.rollback()
return
@ -3645,7 +3641,7 @@ class ComputeManager(manager.Manager):
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
LOG.info(_LI('Migrating'), instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
@ -4010,7 +4006,7 @@ class ComputeManager(manager.Manager):
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance)
LOG.info(_LI('Pausing'), instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.PAUSE,
@ -4032,7 +4028,7 @@ class ComputeManager(manager.Manager):
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance)
LOG.info(_LI('Unpausing'), instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
@ -4069,8 +4065,7 @@ class ComputeManager(manager.Manager):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
LOG.info(_LI("Retrieving diagnostics"), instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
@ -4087,8 +4082,7 @@ class ComputeManager(manager.Manager):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
LOG.info(_LI("Retrieving diagnostics"), instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
@ -4131,7 +4125,7 @@ class ComputeManager(manager.Manager):
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance)
LOG.info(_LI('Resuming'), instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self.network_api.get_instance_nw_info(context, instance)
@ -4381,12 +4375,12 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance)
LOG.debug('Reset network', instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance)
LOG.debug('Inject network info', instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
@ -4407,8 +4401,7 @@ class ComputeManager(manager.Manager):
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), context=context,
instance=instance)
LOG.info(_LI("Get console output"), instance=instance)
output = self.driver.get_console_output(context, instance)
if type(output) is six.text_type:
@ -4677,7 +4670,7 @@ class ComputeManager(manager.Manager):
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
@ -4687,7 +4680,7 @@ class ComputeManager(manager.Manager):
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
@ -4701,12 +4694,12 @@ class ComputeManager(manager.Manager):
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
instance=instance)
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance)
instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
@ -4725,7 +4718,7 @@ class ComputeManager(manager.Manager):
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
instance=instance)
self.volume_api.roll_detaching(context, volume_id)
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
@ -4872,7 +4865,7 @@ class ComputeManager(manager.Manager):
"connection infos: new: %(new_cinfo)s; "
"old: %(old_cinfo)s",
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo},
contex=context, instance=instance)
instance=instance)
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
except Exception:
@ -4883,14 +4876,12 @@ class ComputeManager(manager.Manager):
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
@ -4899,7 +4890,7 @@ class ComputeManager(manager.Manager):
if new_cinfo:
LOG.debug("swap_volume: calling Cinder terminate_connection "
"for %(volume)s", {'volume': conn_volume},
context=context, instance=instance)
instance=instance)
self.volume_api.terminate_connection(context,
conn_volume,
connector)
@ -4912,7 +4903,7 @@ class ComputeManager(manager.Manager):
error=failed)
LOG.debug("swap_volume: Cinder migrate_volume_completion "
"returned: %(comp_ret)s", {'comp_ret': comp_ret},
context=context, instance=instance)
instance=instance)
return (comp_ret, new_cinfo)
@ -4935,7 +4926,7 @@ class ComputeManager(manager.Manager):
LOG.info(_LI('Swapping volume %(old_volume)s for %(new_volume)s'),
{'old_volume': old_volume_id, 'new_volume': new_volume_id},
context=context, instance=instance)
instance=instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
@ -4960,7 +4951,7 @@ class ComputeManager(manager.Manager):
LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with "
"%(updates)s", {'volume_id': bdm.volume_id,
'updates': values},
context=context, instance=instance)
instance=instance)
bdm.update(values)
bdm.save()
@ -6786,7 +6777,7 @@ class ComputeManager(manager.Manager):
migration.save()
except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."),
migration.id, context=context,
migration.id,
instance=instance)
break

View File

@ -103,6 +103,8 @@ spawn_re = re.compile(
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
class BaseASTChecker(ast.NodeVisitor):
@ -769,6 +771,29 @@ def no_log_warn(logical_line):
yield (0, msg)
def check_context_log(logical_line, physical_line, filename):
"""check whether context is being passed to the logs
Not correct: LOG.info(_LI("Rebooting instance"), context=context)
Correct: LOG.info(_LI("Rebooting instance"))
https://bugs.launchpad.net/nova/+bug/1500896
N353
"""
if "nova/tests" in filename:
return
if pep8.noqa(physical_line):
return
if log_remove_context.match(logical_line):
yield(0,
"N353: Nova is using oslo.context's RequestContext "
"which means the context object is in scope when "
"doing logging using oslo.log, so no need to pass it as"
"kwarg.")
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
@ -808,3 +833,4 @@ def factory(register):
register(no_os_popen)
register(no_log_warn)
register(CheckForUncalledTestClosure)
register(check_context_log)

View File

@ -97,7 +97,7 @@ class FloatingIP(object):
True)
LOG.debug("floating IP allocation for instance "
"|%s|", floating_address,
instance_uuid=instance_uuid, context=context)
instance_uuid=instance_uuid)
# get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips()

View File

@ -273,7 +273,7 @@ class NetworkManager(manager.Manager):
network_ref = objects.Network._from_db_object(
context, objects.Network(), network_ref)
LOG.debug('Setting host %s for network %s', self.host,
network_ref.uuid, context=context)
network_ref.uuid)
network_ref.host = self.host
network_ref.save()
return self.host
@ -402,7 +402,7 @@ class NetworkManager(manager.Manager):
networks_list = [self._get_network_dict(network)
for network in networks]
LOG.debug('Networks retrieved for instance: |%s|',
networks_list, context=context, instance_uuid=instance_uuid)
networks_list, instance_uuid=instance_uuid)
try:
self._allocate_mac_addresses(admin_context, instance_uuid,
@ -425,8 +425,7 @@ class NetworkManager(manager.Manager):
net_info = self.get_instance_nw_info(admin_context, instance_uuid,
rxtx_factor, host)
LOG.info(_LI("Allocated network: '%s' for instance"), net_info,
instance_uuid=instance_uuid,
context=context)
instance_uuid=instance_uuid)
return net_info
def deallocate_for_instance(self, context, **kwargs):
@ -475,7 +474,7 @@ class NetworkManager(manager.Manager):
network_ids = set([])
fixed_ips = []
LOG.debug("Network deallocation for instance",
context=context, instance_uuid=instance_uuid)
instance_uuid=instance_uuid)
# deallocate fixed ips
for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip, host=host,
@ -488,7 +487,7 @@ class NetworkManager(manager.Manager):
objects.VirtualInterface.delete_by_instance_uuid(
read_deleted_context, instance_uuid)
LOG.info(_LI("Network deallocated for instance (fixed IPs: '%s')"),
fixed_ips, context=context, instance_uuid=instance_uuid)
fixed_ips, instance_uuid=instance_uuid)
@messaging.expected_exceptions(exception.InstanceNotFound)
def get_instance_nw_info(self, context, instance_id, rxtx_factor,
@ -1029,31 +1028,29 @@ class NetworkManager(manager.Manager):
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when IP is leased."""
LOG.debug('Leased IP |%s|', address, context=context)
LOG.debug('Leased IP |%s|', address)
fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip,
context=context)
LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip)
return
fixed_ip.leased = True
fixed_ip.save()
if not fixed_ip.allocated:
LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), fixed_ip,
context=context, instance_uuid=fixed_ip.instance_uuid)
instance_uuid=fixed_ip.instance_uuid)
def release_fixed_ip(self, context, address, mac=None):
"""Called by dhcp-bridge when IP is released."""
LOG.debug('Released IP |%s|', address, context=context)
LOG.debug('Released IP |%s|', address)
fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s released that is not associated'), fixed_ip,
context=context)
LOG.warning(_LW('IP %s released that is not associated'), fixed_ip)
return
if not fixed_ip.leased:
LOG.warning(_LW('IP %s released that was not leased'), fixed_ip,
context=context, instance_uuid=fixed_ip.instance_uuid)
instance_uuid=fixed_ip.instance_uuid)
else:
fixed_ip.leased = False
fixed_ip.save()

View File

@ -770,3 +770,24 @@ class HackingTestCase(test.NoDBTestCase):
self.assertEqual(1, len(list(func('for i in xrange (10)'))))
self.assertEqual(0, len(list(func('for i in range(10)'))))
self.assertEqual(0, len(list(func('for i in six.moves.range(10)'))))
def test_log_context(self):
code = """
LOG.info(_LI("Rebooting instance"),
context=context, instance=instance)
"""
errors = [(1, 0, 'N353')]
self._assert_has_errors(code, checks.check_context_log,
expected_errors=errors)
code = """
LOG.info(_LI("Rebooting instance"),
context=admin_context, instance=instance)
"""
errors = [(1, 0, 'N353')]
self._assert_has_errors(code, checks.check_context_log,
expected_errors=errors)
code = """
LOG.info(_LI("Rebooting instance"),
instance=instance)
"""
self._assert_has_no_errors(code, checks.check_context_log)

View File

@ -278,7 +278,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
"%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
context=context, instance=instance)
instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
self['connection_info'] = connection_info
@ -310,8 +310,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
"%(volume_id)s at %(mount_point)s."),
{'volume_id': volume_id,
'mount_point': self['mount_device']},
exc_info=True, context=context,
instance=instance)
exc_info=True, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
@ -484,30 +483,29 @@ def convert_volume(volume_bdm):
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
context = attach_args[0]
instance = attach_args[1]
if bdm.get('volume_id'):
LOG.info(_LI('Booting with volume %(volume_id)s at '
'%(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
instance=instance)
elif bdm.get('snapshot_id'):
LOG.info(_LI('Booting with volume snapshot %(snapshot_id)s at '
'%(mountpoint)s'),
{'snapshot_id': bdm.snapshot_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
instance=instance)
elif bdm.get('image_id'):
LOG.info(_LI('Booting with volume-backed-image %(image_id)s at '
'%(mountpoint)s'),
{'image_id': bdm.image_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
instance=instance)
else:
LOG.info(_LI('Booting with blank volume at %(mountpoint)s'),
{'mountpoint': bdm['mount_device']},
context=context, instance=instance)
instance=instance)
bdm.attach(*attach_args, **attach_kwargs)