use already loaded BDM in instance.<action>

In I18e7483ec9a484a660e1d306fdc0986e1d5f952b BDM was added to the instance
notifications. In general to add BDM to the payload an exta DB query is
needed. However in some places the BDM is already separately before the
notify_about_instance_action is called to send the notification. In this cases
loading the BDM again is unnecessary as the already loaded BDM can be reused.

This patch makes sure that notify_about_instance_action is called with the
already loaded BDM. There will be subsequent patches to do the same with
other notify calls.

Change-Id: I391554d3904a5a60b921ef4714a1cfd0a64a25c2
Related-Bug: #1718226
This commit is contained in:
Balazs Gibizer 2017-07-13 14:06:13 +02:00
parent 664322cae7
commit c4fadfd4d2
8 changed files with 172 additions and 81 deletions

View File

@ -4,7 +4,18 @@
"nova_object.data":{
"architecture":"x86_64",
"availability_zone": "nova",
"block_devices":[],
"block_devices":[{
"nova_object.data": {
"boot_index": null,
"delete_on_termination": false,
"device_name": "/dev/sdb",
"tag": null,
"volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
},
"nova_object.name": "BlockDevicePayload",
"nova_object.namespace": "nova",
"nova_object.version": "1.0"
}],
"created_at":"2012-10-29T13:42:11Z",
"deleted_at":"2012-10-29T13:42:11Z",
"display_name":"some-server",

View File

@ -731,7 +731,7 @@ class ComputeManager(manager.Manager):
system_metadata=system_meta)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END, bdms=bdms)
self._delete_scheduler_instance_info(context, instance.uuid)
def _init_instance(self, context, instance):
@ -2254,7 +2254,7 @@ class ComputeManager(manager.Manager):
"shutdown.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START, bdms=bdms)
network_info = instance.get_network_info()
@ -2341,7 +2341,7 @@ class ComputeManager(manager.Manager):
"shutdown.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END, bdms=bdms)
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
@ -2377,7 +2377,7 @@ class ComputeManager(manager.Manager):
"delete.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START, bdms=bdms)
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
@ -2688,13 +2688,13 @@ class ComputeManager(manager.Manager):
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
def _notify_instance_rebuild_error(self, context, instance, error):
def _notify_instance_rebuild_error(self, context, instance, error, bdms):
self._notify_about_instance_usage(context, instance,
'rebuild.error', fault=error)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.ERROR, exception=error)
phase=fields.NotificationPhase.ERROR, exception=error, bdms=bdms)
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@ -2801,8 +2801,7 @@ class ComputeManager(manager.Manager):
# not raise ComputeResourcesUnavailable.
rt.delete_allocation_for_evacuated_instance(
instance, scheduled_node, node_type='destination')
self._notify_instance_rebuild_error(context, instance, e)
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
except (exception.InstanceNotFound,
@ -2810,13 +2809,13 @@ class ComputeManager(manager.Manager):
LOG.debug('Instance was deleted while rebuilding',
instance=instance)
self._set_migration_status(migration, 'failed')
self._notify_instance_rebuild_error(context, instance, e)
self._notify_instance_rebuild_error(context, instance, e, bdms)
except Exception as e:
self._set_migration_status(migration, 'failed')
if recreate or scheduled_node is not None:
rt.delete_allocation_for_evacuated_instance(
instance, scheduled_node, node_type='destination')
self._notify_instance_rebuild_error(context, instance, e)
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise
else:
instance.apply_migration_context()
@ -2907,7 +2906,8 @@ class ComputeManager(manager.Manager):
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START,
bdms=bdms)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
@ -2979,7 +2979,8 @@ class ComputeManager(manager.Manager):
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END,
bdms=bdms)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
@ -4000,12 +4001,13 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.START)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.START, bdms=bdms)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
@ -4042,7 +4044,7 @@ class ComputeManager(manager.Manager):
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END, bdms=bdms)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
@ -4399,12 +4401,21 @@ class ComputeManager(manager.Manager):
def _shelve_instance(self, context, instance, image_id,
clean_shutdown):
LOG.info('Shelving', instance=instance)
offload = CONF.shelved_offload_time == 0
if offload:
# Get the BDMs early so we can pass them into versioned
# notifications since _shelve_offload_instance needs the
# BDMs anyway.
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
else:
bdms = None
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START, bdms=bdms)
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
@ -4436,11 +4447,11 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(context, instance, 'shelve.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END, bdms=bdms)
if CONF.shelved_offload_time == 0:
if offload:
self._shelve_offload_instance(context, instance,
clean_shutdown=False)
clean_shutdown=False, bdms=bdms)
@wrap_exception()
@reverts_task_state
@ -4463,13 +4474,17 @@ class ComputeManager(manager.Manager):
self._shelve_offload_instance(context, instance, clean_shutdown)
do_shelve_offload_instance()
def _shelve_offload_instance(self, context, instance, clean_shutdown):
def _shelve_offload_instance(self, context, instance, clean_shutdown,
bdms=None):
LOG.info('Shelve offloading', instance=instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START, bdms=bdms)
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
@ -4477,8 +4492,6 @@ class ComputeManager(manager.Manager):
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(context,
instance,
@ -4516,7 +4529,7 @@ class ComputeManager(manager.Manager):
'shelve_offload.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END, bdms=bdms)
@wrap_exception()
@reverts_task_state
@ -4558,16 +4571,17 @@ class ComputeManager(manager.Manager):
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
LOG.info('Unshelving', instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._notify_about_instance_usage(context, instance, 'unshelve.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START, bdms=bdms)
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
@ -4624,7 +4638,7 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(context, instance, 'unshelve.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END, bdms=bdms)
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
@ -6011,7 +6025,8 @@ class ComputeManager(manager.Manager):
compute_utils.notify_about_instance_action(context, instance,
self.host,
action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK,
phase=fields.NotificationPhase.START)
phase=fields.NotificationPhase.START,
bdms=bdms)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
@ -6024,9 +6039,11 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
compute_utils.notify_about_instance_action(context, instance,
self.host,
action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK,
phase=fields.NotificationPhase.END)
phase=fields.NotificationPhase.END,
bdms=bdms)
self._set_migration_status(migration, migration_status)

View File

@ -352,7 +352,7 @@ def _get_fault_and_priority_from_exc(exception):
@rpc.if_notifications_enabled
def notify_about_instance_action(context, instance, host, action, phase=None,
source=fields.NotificationSource.COMPUTE,
exception=None):
exception=None, bdms=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
@ -360,11 +360,14 @@ def notify_about_instance_action(context, instance, host, action, phase=None,
:param phase: the phase of the action
:param source: the source of the notification
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exc(exception)
payload = instance_notification.InstanceActionPayload(
instance=instance,
fault=fault)
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceActionNotification(
context=context,
priority=priority,

View File

@ -108,14 +108,15 @@ class InstancePayload(base.NotificationPayloadBase):
'auto_disk_config': fields.DiskConfigField()
}
def __init__(self, instance):
def __init__(self, instance, bdms=None):
super(InstancePayload, self).__init__()
network_info = instance.get_network_info()
self.ip_addresses = IpPayload.from_network_info(network_info)
self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
# TODO(gibi): investigate the possibility to use already in scope bdm
# when available like in instance.create
self.block_devices = BlockDevicePayload.from_instance(instance)
if bdms is not None:
self.block_devices = BlockDevicePayload.from_bdms(bdms)
else:
self.block_devices = BlockDevicePayload.from_instance(instance)
self.populate_schema(instance=instance)
@ -134,8 +135,9 @@ class InstanceActionPayload(InstancePayload):
'fault': fields.ObjectField('ExceptionPayload', nullable=True),
}
def __init__(self, instance, fault):
super(InstanceActionPayload, self).__init__(instance=instance)
def __init__(self, instance, fault, bdms=None):
super(InstanceActionPayload, self).__init__(instance=instance,
bdms=bdms)
self.fault = fault
@ -352,12 +354,21 @@ class BlockDevicePayload(base.NotificationPayloadBase):
return None
instance_bdms = instance.get_bdms()
bdms = []
if instance_bdms is not None:
for bdm in instance_bdms:
if bdm.volume_id is not None:
bdms.append(cls(bdm))
return bdms
return cls.from_bdms(instance_bdms)
else:
return []
@classmethod
def from_bdms(cls, bdms):
"""Returns a list of BlockDevicePayload objects based on the passed
BlockDeviceMappingList.
"""
payloads = []
for bdm in bdms:
if bdm.volume_id is not None:
payloads.append(cls(bdm))
return payloads
@nova_base.NovaObjectRegistry.register_notification

View File

@ -5235,9 +5235,9 @@ class ComputeTestCase(BaseTestCase,
clean_shutdown=clean_shutdown)
mock_notify_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='resize', phase='start'),
action='resize', phase='start', bdms='fake_bdms'),
mock.call(self.context, instance, 'fake-mini',
action='resize', phase='end')])
action='resize', phase='end', bdms='fake_bdms')])
mock_get_instance_vol_bdinfo.assert_called_once_with(
self.context, instance, bdms='fake_bdms')
mock_terminate_vol_conn.assert_called_once_with(self.context,
@ -6283,7 +6283,8 @@ class ComputeTestCase(BaseTestCase,
dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
mock_get_node.return_value = dest_node
mock_bdms.return_value = objects.BlockDeviceMappingList()
bdms = objects.BlockDeviceMappingList()
mock_bdms.return_value = bdms
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(self.compute, '_live_migration_cleanup_flags')
@ -6297,9 +6298,11 @@ class ComputeTestCase(BaseTestCase,
instance.project_id, test.MatchType(dict))
mock_notify.assert_has_calls([
mock.call(c, instance, self.compute.host,
action='live_migration_rollback', phase='start'),
action='live_migration_rollback', phase='start',
bdms=bdms),
mock.call(c, instance, self.compute.host,
action='live_migration_rollback', phase='end')])
action='live_migration_rollback', phase='end',
bdms=bdms)])
mock_nw_api.setup_networks_on_host.assert_called_once_with(
c, instance, self.compute.host)
_test()
@ -6323,7 +6326,8 @@ class ComputeTestCase(BaseTestCase,
dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
mock_get_node.return_value = dest_node
mock_bdms.return_value = objects.BlockDeviceMappingList()
bdms = objects.BlockDeviceMappingList()
mock_bdms.return_value = bdms
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(self.compute, '_live_migration_cleanup_flags')
@ -6338,9 +6342,11 @@ class ComputeTestCase(BaseTestCase,
instance.project_id, test.MatchType(dict))
mock_notify.assert_has_calls([
mock.call(c, instance, self.compute.host,
action='live_migration_rollback', phase='start'),
action='live_migration_rollback', phase='start',
bdms=bdms),
mock.call(c, instance, self.compute.host,
action='live_migration_rollback', phase='end')])
action='live_migration_rollback', phase='end',
bdms=bdms)])
mock_nw_api.setup_networks_on_host.assert_called_once_with(
c, instance, self.compute.host)
_test()
@ -12002,19 +12008,19 @@ class EvacuateHostTestCase(BaseTestCase):
if vm_states_is_stopped:
mock_notify.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host,
action='rebuild', phase='start'),
action='rebuild', phase='start', bdms=bdms),
mock.call(ctxt, self.inst, self.inst.host,
action='power_off', phase='start'),
mock.call(ctxt, self.inst, self.inst.host,
action='power_off', phase='end'),
mock.call(ctxt, self.inst, self.inst.host,
action='rebuild', phase='end')])
action='rebuild', phase='end', bdms=bdms)])
else:
mock_notify.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host,
action='rebuild', phase='start'),
action='rebuild', phase='start', bdms=bdms),
mock.call(ctxt, self.inst, self.inst.host,
action='rebuild', phase='end')])
action='rebuild', phase='end', bdms=bdms)])
mock_setup_networks_on_host.assert_called_once_with(
ctxt, self.inst, self.inst.host)

View File

@ -155,10 +155,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
specd_compute._shutdown_instance = _mark_shutdown
mock_inst.info_cache = call_tracker
mock_bdms = mock.Mock()
specd_compute._delete_instance(specd_compute,
self.context,
mock_inst,
mock.Mock())
mock_bdms)
methods_called = [n for n, a, k in call_tracker.mock_calls]
self.assertEqual(['clear_events_for_instance',
@ -169,7 +170,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_inst,
specd_compute.host,
action='delete',
phase='start')
phase='start',
bdms=mock_bdms)
def _make_compute_node(self, hyp_hostname, cn_id):
cn = mock.Mock(spec_set=['hypervisor_hostname', 'id',
@ -296,9 +298,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='delete', phase='start'),
action='delete', phase='start', bdms=[]),
mock.call(self.context, instance, 'fake-mini',
action='delete', phase='end')])
action='delete', phase='end', bdms=[])])
def test_check_device_tagging_no_tagging(self):
bdms = objects.BlockDeviceMappingList(objects=[
@ -1207,9 +1209,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
notify=True, try_deallocate_networks=False)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shutdown', phase='start'),
action='shutdown', phase='start', bdms=bdms),
mock.call(self.context, instance, 'fake-mini',
action='shutdown', phase='end')])
action='shutdown', phase='end', bdms=bdms)])
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.Instance.get_network_info')
@ -3595,7 +3597,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
)
mock_notify.assert_called_once_with(
mock.ANY, instance, 'fake-mini', action='rebuild', phase='error',
exception=exc)
exception=exc, bdms=None)
def test_rebuild_deleting(self):
instance = fake_instance.fake_instance_obj(self.context)
@ -6088,7 +6090,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def _test(mock_bdm, mock_lmcf, mock_notify, mock_nwapi,
mock_notify_about_instance_action):
mock_bdm.return_value = objects.BlockDeviceMappingList()
bdms = objects.BlockDeviceMappingList()
mock_bdm.return_value = bdms
mock_lmcf.return_value = False, False
mock_instance = mock.MagicMock()
compute._rollback_live_migration(self.context,
@ -6099,9 +6102,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock_instance.project_id, test.MatchType(dict))
mock_notify_about_instance_action.assert_has_calls([
mock.call(self.context, mock_instance, compute.host,
action='live_migration_rollback', phase='start'),
action='live_migration_rollback', phase='start',
bdms=bdms),
mock.call(self.context, mock_instance, compute.host,
action='live_migration_rollback', phase='end')])
action='live_migration_rollback', phase='end',
bdms=bdms)])
self.assertIsInstance(mock_lmcf.call_args_list[0][0][0],
migrate_data_obj.LiveMigrateData)

View File

@ -491,13 +491,23 @@ class UsageInfoTestCase(test.TestCase):
def test_notify_about_instance_action(self):
instance = create_instance(self.context)
bdms = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'instance_uuid': 'f8000000-0000-0000-0000-000000000000',
'destination_type': 'volume',
'boot_index': 0,
'volume_id': 'de8836ac-d75e-11e2-8271-5254009297d6'})])
compute_utils.notify_about_instance_action(
self.context,
instance,
host='fake-compute',
action='delete',
phase='start')
phase='start',
bdms=bdms)
self.assertEqual(len(fake_notifier.VERSIONED_NOTIFICATIONS), 1)
notification = fake_notifier.VERSIONED_NOTIFICATIONS[0]
@ -522,6 +532,15 @@ class UsageInfoTestCase(test.TestCase):
self.assertIn(attr, payload, "Key %s not in payload" % attr)
self.assertEqual(payload['image_uuid'], uuids.fake_image_ref)
self.assertEqual(1, len(payload['block_devices']))
payload_bdm = payload['block_devices'][0]['nova_object.data']
self.assertEqual(
{'boot_index': 0,
'delete_on_termination': False,
'device_name': '/dev/vda',
'tag': None,
'volume_id': 'de8836ac-d75e-11e2-8271-5254009297d6'},
payload_bdm)
def test_notify_about_instance_create(self):
keypair = objects.KeyPair(name='my-key', user_id='fake', type='ssh',

View File

@ -47,6 +47,7 @@ def _fake_resources():
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
@ -58,7 +59,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_instance(self, shelved_offload_time, mock_notify,
mock_notify_instance_usage, mock_get_power_state,
mock_snapshot, mock_power_off, mock_terminate,
clean_shutdown=True):
mock_get_bdms, clean_shutdown=True):
mock_get_power_state.return_value = 123
CONF.set_override('shelved_offload_time', shelved_offload_time)
@ -70,6 +71,11 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance.task_state = task_states.SHELVING
instance.save()
fake_bdms = None
if shelved_offload_time == 0:
fake_bdms = objects.BlockDeviceMappingList()
mock_get_bdms.return_value = fake_bdms
tracking = {'last_state': instance.vm_state}
def check_save(expected_task_state=None):
@ -119,9 +125,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
clean_shutdown=clean_shutdown)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve', phase='start'),
action='shelve', phase='start', bdms=fake_bdms),
mock.call(self.context, instance, 'fake-mini',
action='shelve', phase='end')])
action='shelve', phase='end', bdms=fake_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
@ -184,6 +190,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@ -197,22 +204,26 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.compute.utils.notify_about_instance_action')
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate,
mock_delete_alloc, mock_terminate, mock_get_bdms,
clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
instance.save()
self.useFixture(utils_fixture.TimeFixture())
fake_bdms = objects.BlockDeviceMappingList()
mock_get_bdms.return_value = fake_bdms
with mock.patch.object(instance, 'save'):
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='start'),
action='shelve_offload', phase='start',
bdms=fake_bdms),
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='end')])
action='shelve_offload', phase='end',
bdms=fake_bdms)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
@ -236,6 +247,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
return instance
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@ -248,7 +260,10 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def test_unshelve(self, mock_setup_network,
mock_get_power_state, mock_spawn,
mock_prep_block_device, mock_notify_instance_usage,
mock_notify_instance_action):
mock_notify_instance_action,
mock_get_bdms):
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
instance.task_state = task_states.UNSHELVING
instance.save()
@ -312,9 +327,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='start'),
action='unshelve', phase='start', bdms=mock_bdms),
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='end')])
action='unshelve', phase='end', bdms=mock_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
@ -346,6 +361,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertEqual(self.compute.host, instance.host)
self.assertFalse(instance.auto_disk_config)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
'instance_claim')
@ -363,7 +379,10 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mock_prep_block_device, mock_spawn,
mock_get_power_state,
mock_setup_network, mock_instance_claim,
mock_notify_instance_action):
mock_notify_instance_action,
mock_get_bdms):
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
@ -405,9 +424,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='start'),
action='unshelve', phase='start', bdms=mock_bdms),
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='end')])
action='unshelve', phase='end', bdms=mock_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [