Merge "Update live migration to use v3 cinder api"

This commit is contained in:
Zuul 2017-10-19 20:08:21 +00:00 committed by Gerrit Code Review
commit af4703cb38
7 changed files with 478 additions and 36 deletions

View File

@ -5443,19 +5443,22 @@ class ComputeManager(manager.Manager):
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
"""Remove the volume connection on this host
Detach the volume from this instance on this host, and if this is
the cinder v2 flow, call cinder to terminate the connection.
"""
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm.driver_detach(context, instance,
self.volume_api, self.driver)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
if bdm.attachment_id is None:
# cinder v2 api flow
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id,
connector)
except exception.NotFound:
pass
@ -5684,8 +5687,53 @@ class ComputeManager(manager.Manager):
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
migrate_data.old_vol_attachment_ids = {}
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id is not None:
# This bdm uses the new cinder v3.44 API.
# We will create a new attachment for this
# volume on this migration destination host. The old
# attachment will be deleted on the source host
# when the migration succeeds. The old attachment_id
# is stored in dict with the key being the bdm.volume_id
# so it can be restored on rollback.
#
# Also note that attachment_update is not needed as we
# are providing the connector in the create call.
attach_ref = self.volume_api.attachment_create(
context, bdm.volume_id, bdm.instance_uuid,
connector=connector)
# save current attachment so we can detach it on success,
# or restore it on a rollback.
migrate_data.old_vol_attachment_ids[bdm.volume_id] = \
bdm.attachment_id
# update the bdm with the new attachment_id.
bdm.attachment_id = attach_ref['id']
bdm.save()
except Exception:
# If we raise, migrate_data with the updated attachment ids
# will not be returned to the source host for rollback.
# So we need to rollback new attachments here.
with excutils.save_and_reraise_exception():
old_attachments = migrate_data.old_vol_attachment_ids
for bdm in bdms:
if (bdm.is_volume and bdm.attachment_id is not None and
bdm.volume_id in old_attachments):
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = old_attachments[bdm.volume_id]
bdm.save()
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
context, instance, refresh_conn_info=True,
bdms=bdms)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
@ -5700,6 +5748,13 @@ class ComputeManager(manager.Manager):
migrate_data)
LOG.debug('driver pre_live_migration data is %s', migrate_data)
# Volume connections are complete, tell cinder that all the
# attachments have completed.
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id is not None:
self.volume_api.attachment_complete(context,
bdm.attachment_id)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
@ -5928,15 +5983,24 @@ class ComputeManager(manager.Manager):
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
if bdm.attachment_id is None:
# Prior to cinder v3.44:
# We don't want to actually mark the volume detached, or
# delete the bdm, just remove the connection from this
# host.
#
# remove the volume connection without detaching from
# hypervisor because the instance is not running anymore
# on the current host
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
else:
# cinder v3.44 api flow - delete the old attachment
# for the source host
old_attachment_id = \
migrate_data.old_vol_attachment_ids[bdm.volume_id]
self.volume_api.attachment_delete(ctxt, old_attachment_id)
# Releasing vlan.
# (not necessary in current implementation?)
@ -6151,9 +6215,23 @@ class ComputeManager(manager.Manager):
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
# remove the connection on the destination host
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
if bdm.attachment_id:
# 3.44 cinder api flow. Set the bdm's
# attachment_id to the old attachment of the source
# host. If old_attachments is not there, then
# there was an error before the new attachment was made.
old_attachments = migrate_data.old_vol_attachment_ids \
if 'old_vol_attachment_ids' in migrate_data else None
if old_attachments and bdm.volume_id in old_attachments:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = old_attachments[bdm.volume_id]
bdm.save()
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
compute_utils.notify_about_instance_action(context, instance,

View File

@ -29,6 +29,10 @@ class LiveMigrateData(obj_base.NovaObject):
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
# old_vol_attachment_ids is a dict used to store the old attachment_ids
# for each volume so they can be restored on a migration rollback. The
# key is the volume_id, and the value is the attachment_id.
'old_vol_attachment_ids': fields.DictOfStringsField(),
}
def to_legacy_dict(self, pre_migration_result=False):
@ -110,7 +114,8 @@ class LibvirtLiveMigrateData(LiveMigrateData):
# Version 1.2: Added 'serial_listen_ports' to allow live migration with
# serial console.
# Version 1.3: Added 'supported_perf_events'
VERSION = '1.3'
# Version 1.4: Added old_vol_attachment_ids
VERSION = '1.4'
fields = {
'filename': fields.StringField(),
@ -135,6 +140,9 @@ class LibvirtLiveMigrateData(LiveMigrateData):
super(LibvirtLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 4):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 3):
if 'supported_perf_events' in primitive:
del primitive['supported_perf_events']
@ -223,7 +231,8 @@ class LibvirtLiveMigrateData(LiveMigrateData):
class XenapiLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added vif_uuid_map
VERSION = '1.1'
# Version 1.2: Added old_vol_attachment_ids
VERSION = '1.2'
fields = {
'block_migration': fields.BooleanField(nullable=True),
@ -275,6 +284,9 @@ class XenapiLiveMigrateData(LiveMigrateData):
super(XenapiLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 1):
if 'vif_uuid_map' in primitive:
del primitive['vif_uuid_map']
@ -284,7 +296,8 @@ class XenapiLiveMigrateData(LiveMigrateData):
class HyperVLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added is_shared_instance_path
VERSION = '1.1'
# Version 1.2: Added old_vol_attachment_ids
VERSION = '1.2'
fields = {'is_shared_instance_path': fields.BooleanField()}
@ -292,6 +305,9 @@ class HyperVLiveMigrateData(LiveMigrateData):
super(HyperVLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 1):
if 'is_shared_instance_path' in primitive:
del primitive['is_shared_instance_path']
@ -313,7 +329,8 @@ class HyperVLiveMigrateData(LiveMigrateData):
class PowerVMLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added the Virtual Ethernet Adapter VLAN mappings.
VERSION = '1.1'
# Version 1.2: Added old_vol_attachment_ids
VERSION = '1.2'
fields = {
'host_mig_data': fields.DictOfNullableStringsField(),
@ -330,6 +347,9 @@ class PowerVMLiveMigrateData(LiveMigrateData):
super(PowerVMLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 1):
if 'vea_vlan_mappings' in primitive:
del primitive['vea_vlan_mappings']

View File

@ -2886,6 +2886,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_volume_api.terminate_connection.assert_called_once_with(
self.context, uuids.volume_id, connector)
def test_remove_volume_connection_cinder_v3_api(self):
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance)
volume_id = uuids.volume
vol_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid,
'connection_info': '{"test": "test"}'})
vol_bdm.attachment_id = uuids.attachment
@mock.patch.object(self.compute.volume_api, 'terminate_connection')
@mock.patch.object(self.compute, 'driver')
@mock.patch.object(driver_bdm_volume, 'driver_detach')
@mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance')
def _test(mock_get_bdms, mock_detach, mock_driver, mock_terminate):
mock_get_bdms.return_value = vol_bdm
self.compute.remove_volume_connection(self.context,
volume_id, instance)
mock_detach.assert_called_once_with(self.context, instance,
self.compute.volume_api,
mock_driver)
mock_terminate.assert_not_called()
_test()
def test_delete_disk_metadata(self):
bdm = objects.BlockDeviceMapping(volume_id=uuids.volume_id, tag='foo')
instance = fake_instance.fake_instance_obj(self.context)
@ -6266,10 +6295,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch.object(compute.driver, 'pre_live_migration')
@mock.patch.object(compute, '_get_instance_block_device_info')
@mock.patch.object(compute_utils, 'is_volume_backed_instance')
def _test(mock_ivbi, mock_gibdi, mock_plm, mock_nwapi, mock_notify):
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def _test(mock_get_bdms, mock_ivbi, mock_gibdi, mock_plm, mock_nwapi,
mock_notify):
mock_get_bdms.return_value = []
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance)
migrate_data = migrate_data_obj.LiveMigrateData()
mock_plm.return_value = migrate_data
r = compute.pre_live_migration(self.context, {'uuid': 'foo'},
r = compute.pre_live_migration(self.context, instance,
False, {}, {})
self.assertIsInstance(r, dict)
self.assertIsInstance(mock_plm.call_args_list[0][0][5],
@ -6295,6 +6330,136 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
_test()
def test_pre_live_migration_cinder_v3_api(self):
# This tests that pre_live_migration with a bdm with an
# attachment_id, will create a new attachment and update
# attachment_id's in the bdm.
compute = manager.ComputeManager()
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance)
volume_id = uuids.volume
vol_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid,
'connection_info': '{"test": "test"}'})
# attach_create should not be called on this
image_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'image', 'destination_type': 'local',
'volume_id': volume_id, 'device_name': '/dev/vda',
'instance_uuid': instance.uuid,
'connection_info': '{"test": "test"}'})
orig_attachment_id = uuids.attachment1
vol_bdm.attachment_id = orig_attachment_id
new_attachment_id = uuids.attachment2
image_bdm.attachment_id = uuids.attachment3
migrate_data = migrate_data_obj.LiveMigrateData()
migrate_data.old_vol_attachment_ids = {}
@mock.patch.object(compute.volume_api, 'attachment_complete')
@mock.patch.object(vol_bdm, 'save')
@mock.patch.object(compute, '_notify_about_instance_usage')
@mock.patch.object(compute, 'network_api')
@mock.patch.object(compute.driver, 'pre_live_migration')
@mock.patch.object(compute, '_get_instance_block_device_info')
@mock.patch.object(compute_utils, 'is_volume_backed_instance')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(compute.volume_api, 'attachment_create')
def _test(mock_attach, mock_get_bdms, mock_ivbi,
mock_gibdi, mock_plm, mock_nwapi, mock_notify,
mock_bdm_save, mock_attach_complete):
mock_get_bdms.return_value = [vol_bdm, image_bdm]
mock_attach.return_value = {'id': new_attachment_id}
mock_plm.return_value = migrate_data
connector = compute.driver.get_volume_connector(instance)
r = compute.pre_live_migration(self.context, instance,
False, {}, migrate_data)
self.assertIsInstance(r, migrate_data_obj.LiveMigrateData)
self.assertIsInstance(mock_plm.call_args_list[0][0][5],
migrate_data_obj.LiveMigrateData)
mock_attach.assert_called_once_with(
self.context, volume_id, instance.uuid, connector=connector)
self.assertEqual(vol_bdm.attachment_id, new_attachment_id)
self.assertEqual(migrate_data.old_vol_attachment_ids[volume_id],
orig_attachment_id)
mock_bdm_save.assert_called_once_with()
mock_attach_complete.assert_called_once_with(self.context,
new_attachment_id)
_test()
def test_pre_live_migration_exception_cinder_v3_api(self):
# The instance in this test has 2 attachments. The second attach_create
# will throw an exception. This will test that the first attachment
# is restored after the exception is thrown.
compute = manager.ComputeManager()
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance)
volume1_id = uuids.volume1
vol1_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': volume1_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid,
'connection_info': '{"test": "test"}'})
vol1_orig_attachment_id = uuids.attachment1
vol1_bdm.attachment_id = vol1_orig_attachment_id
volume2_id = uuids.volume2
vol2_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': volume2_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid,
'connection_info': '{"test": "test"}'})
vol2_orig_attachment_id = uuids.attachment2
vol2_bdm.attachment_id = vol2_orig_attachment_id
migrate_data = migrate_data_obj.LiveMigrateData()
migrate_data.old_vol_attachment_ids = {}
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(vol1_bdm, 'save')
@mock.patch.object(compute, '_notify_about_instance_usage')
@mock.patch.object(compute, 'network_api')
@mock.patch.object(compute.driver, 'pre_live_migration')
@mock.patch.object(compute, '_get_instance_block_device_info')
@mock.patch.object(compute_utils, 'is_volume_backed_instance')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(compute.volume_api, 'attachment_delete')
@mock.patch.object(compute.volume_api, 'attachment_create')
def _test(mock_attach_create, mock_attach_delete, mock_get_bdms,
mock_ivbi, mock_gibdi, mock_plm, mock_nwapi, mock_notify,
mock_bdm_save, mock_exception):
new_attachment_id = uuids.attachment3
mock_attach_create.side_effect = [{'id': new_attachment_id},
test.TestingException]
mock_get_bdms.return_value = [vol1_bdm, vol2_bdm]
mock_plm.return_value = migrate_data
self.assertRaises(test.TestingException,
compute.pre_live_migration,
self.context, instance, False, {}, migrate_data)
self.assertEqual(vol1_orig_attachment_id, vol1_bdm.attachment_id)
self.assertEqual(vol2_orig_attachment_id, vol2_bdm.attachment_id)
self.assertEqual(mock_attach_create.call_count, 2)
mock_attach_delete.assert_called_once_with(self.context,
new_attachment_id)
_test()
@mock.patch.object(objects.ComputeNode,
'get_first_node_by_host_for_old_compat')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@ -6487,6 +6652,116 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
_do_test()
def test_post_live_migration_cinder_v3_api(self):
# Because live migration has succeeded, _post_live_migration
# should call attachment_delete with the original/old attachment_id
compute = manager.ComputeManager()
dest_host = 'test_dest_host'
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance)
bdm_id = 1
volume_id = uuids.volume
vol_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid,
'id': bdm_id,
'connection_info':
'{"connector": {"host": "%s"}}' % dest_host})
image_bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'image', 'destination_type': 'local',
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid})
vol_bdm.attachment_id = uuids.attachment1
orig_attachment_id = uuids.attachment2
migrate_data = migrate_data_obj.LiveMigrateData()
migrate_data.old_vol_attachment_ids = {volume_id: orig_attachment_id}
image_bdm.attachment_id = uuids.attachment3
@mock.patch.object(compute, '_get_resource_tracker')
@mock.patch.object(vol_bdm, 'save')
@mock.patch.object(compute, 'update_available_resource')
@mock.patch.object(compute.volume_api, 'attachment_delete')
@mock.patch.object(compute, '_get_instance_block_device_info')
@mock.patch.object(compute, 'compute_rpcapi')
@mock.patch.object(compute, 'driver')
@mock.patch.object(compute, '_notify_about_instance_usage')
@mock.patch.object(compute, 'network_api')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def _test(mock_get_bdms, mock_net_api, mock_notify, mock_driver,
mock_rpc, mock_get_bdm_info, mock_attach_delete,
mock_update_resource, mock_bdm_save, mock_rt):
mock_rt.return_value = mock.Mock()
mock_get_bdms.return_value = [vol_bdm, image_bdm]
compute._post_live_migration(self.context, instance, dest_host,
migrate_data=migrate_data)
mock_attach_delete.assert_called_once_with(
self.context, orig_attachment_id)
_test()
@mock.patch.object(objects.ComputeNode,
'get_first_node_by_host_for_old_compat')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_from_instance_allocation')
def test_rollback_live_migration_cinder_v3_api(self, mock_remove_allocs,
mock_get_node):
compute = manager.ComputeManager()
dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
mock_get_node.return_value = dest_node
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance)
volume_id = uuids.volume
orig_attachment_id = uuids.attachment1
new_attachment_id = uuids.attachment2
migrate_data = migrate_data_obj.LiveMigrateData()
migrate_data.old_vol_attachment_ids = {
volume_id: orig_attachment_id}
bdm = fake_block_device.fake_bdm_object(
self.context,
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid})
bdm.attachment_id = new_attachment_id
@mock.patch.object(compute.volume_api, 'attachment_delete')
@mock.patch.object(bdm, 'save')
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(instance, 'save')
@mock.patch.object(compute, '_notify_about_instance_usage')
@mock.patch.object(compute.compute_rpcapi, 'remove_volume_connection')
@mock.patch.object(compute, 'network_api')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def _test(mock_get_bdms, mock_net_api, mock_remove_conn,
mock_usage, mock_instance_save, mock_action, mock_save,
mock_attach_delete):
# this tests that _rollback_live_migration replaces the bdm's
# attachment_id with the original attachment id that is in
# migrate_data.
mock_get_bdms.return_value = objects.BlockDeviceMappingList(
objects=[bdm])
compute._rollback_live_migration(self.context, instance, None,
migrate_data)
mock_remove_conn.assert_called_once_with(self.context, instance,
bdm.volume_id, None)
mock_attach_delete.called_once_with(self.context,
new_attachment_id)
self.assertEqual(bdm.attachment_id, orig_attachment_id)
mock_save.assert_called_once_with()
_test()
def _get_migration(self, migration_id, status, migration_type):
migration = objects.Migration()
migration.id = migration_id

View File

@ -15,6 +15,7 @@
from nova import objects
from nova.objects import migrate_data
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel as uuids
class _TestLiveMigrateData(object):
@ -222,6 +223,14 @@ class _TestLibvirtLiveMigrateData(object):
obj2.to_legacy_dict())
self.assertEqual(obj.bdms[0].serial, obj2.bdms[0].serial)
def test_obj_make_compatible(self):
obj = migrate_data.LibvirtLiveMigrateData(
old_vol_attachment_ids={uuids.volume: uuids.attachment})
primitive = obj.obj_to_primitive(target_version='1.0')
self.assertNotIn('old_vol_attachment_ids', primitive)
primitive = obj.obj_to_primitive(target_version='1.3')
self.assertNotIn('old_vol_attachment_ids', primitive)
class TestLibvirtLiveMigrateData(test_objects._LocalTest,
_TestLibvirtLiveMigrateData):
@ -328,11 +337,13 @@ class _TestXenapiLiveMigrateData(object):
destination_sr_ref='foo',
migrate_send_data={'key': 'val'},
sr_uuid_map={'apple': 'banana'},
vif_uuid_map={'orange': 'lemon'})
vif_uuid_map={'orange': 'lemon'},
old_vol_attachment_ids={uuids.volume: uuids.attachment})
primitive = obj.obj_to_primitive('1.0')
self.assertNotIn('vif_uuid_map', primitive['nova_object.data'])
primitive2 = obj.obj_to_primitive('1.1')
self.assertIn('vif_uuid_map', primitive2['nova_object.data'])
self.assertNotIn('old_vol_attachment_ids', primitive2)
class TestXenapiLiveMigrateData(test_objects._LocalTest,
@ -348,9 +359,12 @@ class TestRemoteXenapiLiveMigrateData(test_objects._RemoteTest,
class _TestHyperVLiveMigrateData(object):
def test_obj_make_compatible(self):
obj = migrate_data.HyperVLiveMigrateData(
is_shared_instance_path=True)
is_shared_instance_path=True,
old_vol_attachment_ids={'yes': 'no'})
primitive = obj.obj_to_primitive(target_version='1.0')
self.assertNotIn('is_shared_instance_path', primitive)
primitive = obj.obj_to_primitive(target_version='1.1')
self.assertNotIn('old_vol_attachment_ids', primitive)
def test_to_legacy_dict(self):
obj = migrate_data.HyperVLiveMigrateData(
@ -391,7 +405,8 @@ class _TestPowerVMLiveMigrateData(object):
public_key='a_key',
dest_proc_compat='POWER7',
vol_data=dict(three=4),
vea_vlan_mappings=dict(five=6))
vea_vlan_mappings=dict(five=6),
old_vol_attachment_ids=dict(seven=8))
@staticmethod
def _mk_leg():
@ -404,6 +419,7 @@ class _TestPowerVMLiveMigrateData(object):
'dest_proc_compat': 'POWER7',
'vol_data': {'three': '4'},
'vea_vlan_mappings': {'five': '6'},
'old_vol_attachment_ids': {'seven': '8'},
}
def test_migrate_data(self):
@ -416,6 +432,8 @@ class _TestPowerVMLiveMigrateData(object):
obj = self._mk_obj()
primitive = obj.obj_to_primitive(target_version='1.0')
self.assertNotIn('vea_vlan_mappings', primitive)
primitive = obj.obj_to_primitive(target_version='1.1')
self.assertNotIn('old_vol_attachment_ids', primitive)
def test_to_legacy_dict(self):
self.assertEqual(self._mk_leg(), self._mk_obj().to_legacy_dict())

View File

@ -1093,7 +1093,7 @@ object_data = {
'FloatingIPList': '1.12-e4debd21fddb12cf40d36f737225fa9d',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HostMappingList': '1.0-267d952a5d48361d6d7604f50775bc34',
'HyperVLiveMigrateData': '1.1-9987a3cec31a81abac6fba7cc722e43f',
'HyperVLiveMigrateData': '1.2-bcb6dad687369348ffe0f41da6888704',
'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
@ -1118,7 +1118,7 @@ object_data = {
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2',
'LibvirtLiveMigrateBDMInfo': '1.0-252aabb723ca79d5469fa56f64b57811',
'LibvirtLiveMigrateData': '1.3-2795e5646ee21e8c7f1c3e64fb6c80a3',
'LibvirtLiveMigrateData': '1.4-ae5f344e7f78d3b45c259a0f80ea69f5',
'KeyPair': '1.4-1244e8d1b103cc69d038ed78ab3a8cc6',
'KeyPairList': '1.3-94aad3ac5c938eef4b5e83da0212f506',
'MemoryDiagnostics': '1.0-2c995ae0f2223bb0f8e523c5cc0b83da',
@ -1142,7 +1142,7 @@ object_data = {
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'PowerVMLiveMigrateData': '1.1-ac0fdd26da685f12d7038782cabd393a',
'PowerVMLiveMigrateData': '1.2-b62cd242c5205a853545b1085b072340',
'Quotas': '1.3-40fcefe522111dddd3e5e6155702cf4e',
'QuotasNoOp': '1.3-347a039fc7cfee7b225b68b5181e0733',
'RequestSpec': '1.8-35033ecef47a880f9a5e46e2269e2b97',
@ -1168,7 +1168,7 @@ object_data = {
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenDeviceBus': '1.0-272a4f899b24e31e42b2b9a7ed7e9194',
'XenapiLiveMigrateData': '1.1-79e69f5ac9abfbcfcbaec18e8280bec6',
'XenapiLiveMigrateData': '1.2-72b9b6e70de34a283689ec7126aa4879',
}

View File

@ -10389,12 +10389,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': {
{'attachment_id': None,
'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': 'fake_serial1'},
'mount_device': '/dev/sda',
},
{'connection_info': {
{'attachment_id': None,
'connection_info': {
'data': {},
'serial': 'fake_serial2'},
'mount_device': '/dev/sdb', }]}
@ -10430,6 +10432,46 @@ class LibvirtConnTestCase(test.NoDBTestCase,
inst_ref),
mock.call({'data': {}}, 'sdb', inst_ref)])
def test_post_live_migration_cinder_v3(self):
cntx = context.get_admin_context()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = fake_instance.fake_instance_obj(cntx,
uuid=uuids.instance)
vol_id = uuids.volume
old_attachment_id = uuids.attachment
disk_dev = 'sda'
connection_info = {
'data': {'multipath_id': 'dummy1'},
'serial': vol_id}
block_device_mapping = [
{'attachment_id': uuids.attachment,
'mount_device': '/dev/%s' % disk_dev,
'connection_info': connection_info}]
old_attachment = {
'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': vol_id}}
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True,
old_vol_attachment_ids={vol_id: old_attachment_id})
@mock.patch.object(drvr, '_disconnect_volume')
@mock.patch.object(drvr._volume_api, 'attachment_get')
@mock.patch.object(driver, 'block_device_info_get_mapping')
def _test(mock_get_bdms, mock_attachment_get, mock_disconnect):
mock_get_bdms.return_value = block_device_mapping
mock_attachment_get.return_value = old_attachment
drvr.post_live_migration(cntx, instance, None,
migrate_data=migrate_data)
mock_attachment_get.assert_called_once_with(cntx,
old_attachment_id)
mock_disconnect.assert_called_once_with(connection_info, disk_dev,
instance)
_test()
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance = objects.Instance(**self.test_instance)

View File

@ -7066,15 +7066,24 @@ class LibvirtDriver(driver.ComputeDriver):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
connector = self.get_volume_connector(instance)
volume_api = self._volume_api
for vol in block_device_mapping:
# Retrieve connection info from Cinder's initialize_connection API.
# The info returned will be accurate for the source server.
volume_id = vol['connection_info']['serial']
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if vol['attachment_id'] is None:
# Cinder v2 api flow: Retrieve connection info from Cinder's
# initialize_connection API. The info returned will be
# accurate for the source server.
connector = self.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(
context, volume_id, connector)
else:
# cinder v3.44 api flow: Retrieve the connection_info for
# the old attachment from cinder.
old_attachment_id = \
migrate_data.old_vol_attachment_ids[volume_id]
old_attachment = volume_api.attachment_get(
context, old_attachment_id)
connection_info = old_attachment['connection_info']
# TODO(leeantho) The following multipath_id logic is temporary
# and will be removed in the future once os-brick is updated