Avoid redundant initialize_connection on source post live migration

During live migration we update bdm.connection_info for attached volumes
in pre_live_migration to reflect the new connection on the destination
node. This means that after migration completes the BDM no longer has a
reference to the original connection_info to do the detach on the source
host. To address this, change I3dfb75eb added a second call to
initialize_connection on the source host to re-fetch the source host
connection_info before calling disconnect.

Unfortunately the cinder driver interface does not strictly require that
multiple calls to initialize_connection will return consistent results.
Although they normally do in practice, there is at least one cinder
driver (delliscsi) which doesn't. This results in a failure to
disconnect on the source host post migration.

This change avoids the issue entirely by fetching the BDMs prior to
modification on the destination node. As well as working round this
specific issue, it also avoids a redundant cinder call in all cases.

Note that this massively simplifies post_live_migration in the libvirt
driver. The complexity removed was concerned with reconstructing the
original connection_info. This required considering the cinder v2 and v3
use cases, and reconstructing the multipath_id which was written to
connection_info by the libvirt fibrechannel volume connector on
connection. These things are not necessary when we just use the original
data unmodified.

Other drivers affected are Xenapi and HyperV. Xenapi doesn't touch
volumes in post_live_migration, so is unaffected. HyperV did not
previously account for differences in connection_info between source and
destination, so was likely previously broken. This change should fix it.

Closes-Bug: #1754716
Closes-Bug: #1814245
Change-Id: I0390c9ff51f49b063f736ca6ef868a4fa782ede5
This commit is contained in:
Matthew Booth 2018-03-09 14:41:49 +00:00 committed by Lee Yarwood
parent a6963fa685
commit b626c0dc7b
6 changed files with 97 additions and 142 deletions

View File

@ -1800,7 +1800,7 @@ class ComputeManager(manager.Manager):
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance, bdms)
@ -6145,6 +6145,9 @@ class ComputeManager(manager.Manager):
# save current attachment so we can detach it on success,
# or restore it on a rollback.
# NOTE(mdbooth): This data is no longer used by the source
# host since change I0390c9ff. We can't remove it until we
# are sure the source host has been upgraded.
migrate_data.old_vol_attachment_ids[bdm.volume_id] = \
bdm.attachment_id
@ -6271,6 +6274,8 @@ class ComputeManager(manager.Manager):
# done on source/destination. For now, this is just here for status
# reporting
self._set_migration_status(migration, 'preparing')
source_bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
class _BreakWaitForInstanceEvent(Exception):
"""Used as a signal to stop waiting for the network-vif-plugged
@ -6285,7 +6290,7 @@ class ComputeManager(manager.Manager):
if ('block_migration' in migrate_data and
migrate_data.block_migration):
block_device_info = self._get_instance_block_device_info(
context, instance)
context, instance, bdms=source_bdms)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
@ -6362,10 +6367,19 @@ class ComputeManager(manager.Manager):
self._set_migration_status(migration, 'running')
if migrate_data:
migrate_data.migration = migration
# NOTE(mdbooth): pre_live_migration will update connection_info and
# attachment_id on all volume BDMS to reflect the new destination
# host attachment. We fetch BDMs before that to retain connection_info
# and attachment_id relating to the source host for post migration
# cleanup.
post_live_migration = functools.partial(self._post_live_migration,
source_bdms=source_bdms)
LOG.debug('live_migration data is %s', migrate_data)
try:
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
except Exception:
@ -6543,8 +6557,9 @@ class ComputeManager(manager.Manager):
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
def _post_live_migration(self, ctxt, instance, dest,
block_migration=False, migrate_data=None,
source_bdms=None):
"""Post operations for live migration.
This method is called from live_migration
@ -6555,24 +6570,25 @@ class ComputeManager(manager.Manager):
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
:param source_bdms: BDMs prior to modification by the destination
compute host. Set by _do_live_migration and not
part of the callback interface, so this is never
None
required for live migration without shared storage
"""
LOG.info('_post_live_migration() is started..',
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
ctxt, instance, bdms=source_bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
for bdm in source_bdms:
if bdm.is_volume:
# Detaching volumes is a call to an external API that can fail.
# If it does, we need to handle it gracefully so that the call
@ -6597,10 +6613,9 @@ class ComputeManager(manager.Manager):
else:
# cinder v3.44 api flow - delete the old attachment
# for the source host
old_attachment_id = \
migrate_data.old_vol_attachment_ids[bdm.volume_id]
self.volume_api.attachment_delete(ctxt,
old_attachment_id)
bdm.attachment_id)
except Exception as e:
if bdm.attachment_id is None:
LOG.error('Connection for volume %s not terminated on '
@ -6610,7 +6625,7 @@ class ComputeManager(manager.Manager):
else:
LOG.error('Volume attachment %s not deleted on source '
'host %s during post_live_migration: %s',
old_attachment_id, self.host,
bdm.attachment_id, self.host,
six.text_type(e), instance=instance)
# Releasing vlan.

View File

@ -109,6 +109,8 @@ class LiveMigrateData(obj_base.NovaObject):
# old_vol_attachment_ids is a dict used to store the old attachment_ids
# for each volume so they can be restored on a migration rollback. The
# key is the volume_id, and the value is the attachment_id.
# TODO(mdbooth): This field was made redundant by change I0390c9ff. We
# should eventually remove it.
'old_vol_attachment_ids': fields.DictOfStringsField(),
# wait_for_vif_plugged is set in pre_live_migration on the destination
# compute host based on the [compute]/live_migration_wait_for_vif_plug

View File

@ -6426,6 +6426,7 @@ class ComputeTestCase(BaseTestCase,
is_shared_block_storage=False,
migration=migration_obj,
block_migration=False)
bdms = objects.BlockDeviceMappingList(objects=[])
with test.nested(
mock.patch.object(
@ -6437,7 +6438,8 @@ class ComputeTestCase(BaseTestCase,
mock_migrate, mock_setup, mock_mig_save
):
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
migrate_data=migrate_data,
source_bdms=bdms)
self.assertIn('cleanup', result)
self.assertTrue(result['cleanup'])
@ -6469,6 +6471,7 @@ class ComputeTestCase(BaseTestCase,
status='completed')
migrate_data = migrate_data_obj.LiveMigrateData(
migration=migration_obj)
bdms = objects.BlockDeviceMappingList(objects=[])
# creating mocks
with test.nested(
@ -6493,7 +6496,8 @@ class ComputeTestCase(BaseTestCase,
clear_events, update_available_resource, mig_save
):
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
migrate_data=migrate_data,
source_bdms=bdms)
mock_notify.assert_has_calls([
mock.call(c, instance, 'fake-mini',
action='live_migration_post', phase='start'),
@ -6549,6 +6553,7 @@ class ComputeTestCase(BaseTestCase,
status='completed')
migrate_data = migrate_data_obj.LiveMigrateData(
migration=migration_obj)
bdms = objects.BlockDeviceMappingList(objects=[])
# creating mocks
with test.nested(
@ -6574,7 +6579,8 @@ class ComputeTestCase(BaseTestCase,
clear_events, update_available_resource, mig_save
):
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
migrate_data=migrate_data,
source_bdms=bdms)
update_available_resource.assert_has_calls([mock.call(c)])
self.assertEqual('completed', migration_obj.status)
# assert we did not log a success message
@ -6613,8 +6619,6 @@ class ComputeTestCase(BaseTestCase,
'clear_events_for_instance'),
mock.patch.object(self.compute,
'_get_instance_block_device_info'),
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid'),
mock.patch.object(self.compute.driver, 'get_volume_connector'),
mock.patch.object(cinder.API, 'terminate_connection'),
mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@ -6622,14 +6626,14 @@ class ComputeTestCase(BaseTestCase,
) as (
migrate_instance_start, post_live_migration_at_destination,
setup_networks_on_host, clear_events_for_instance,
get_instance_volume_block_device_info, get_by_instance_uuid,
get_volume_connector, terminate_connection, delete_alloc,
get_instance_volume_block_device_info, get_volume_connector,
terminate_connection, delete_alloc,
):
get_by_instance_uuid.return_value = bdms
get_volume_connector.return_value = 'fake-connector'
self.compute._post_live_migration(c, instance, 'dest_host',
migrate_data=mock.MagicMock())
migrate_data=mock.MagicMock(),
source_bdms=bdms)
terminate_connection.assert_called_once_with(
c, uuids.volume_id, 'fake-connector')

View File

@ -7429,14 +7429,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._post_live_migration')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_live_migration_wait_vif_plugged(
self, mock_post_live_mig, mock_pre_live_mig):
self, mock_get_bdms, mock_post_live_mig, mock_pre_live_mig):
"""Tests the happy path of waiting for network-vif-plugged events from
neutron when pre_live_migration returns a migrate_data object with
wait_for_vif_plugged=True.
"""
migrate_data = objects.LibvirtLiveMigrateData(
wait_for_vif_plugged=True)
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
mock_pre_live_mig.return_value = migrate_data
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
@ -7460,14 +7462,17 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._post_live_migration')
@mock.patch('nova.compute.manager.LOG.debug')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_live_migration_wait_vif_plugged_old_dest_host(
self, mock_log_debug, mock_post_live_mig, mock_pre_live_mig):
self, mock_get_bdms, mock_log_debug, mock_post_live_mig,
mock_pre_live_mig):
"""Tests the scenario that the destination compute returns a
migrate_data with no wait_for_vif_plugged set because the dest compute
doesn't have that code yet. In this case, we default to legacy behavior
of not waiting.
"""
migrate_data = objects.LibvirtLiveMigrateData()
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
mock_pre_live_mig.return_value = migrate_data
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
@ -7488,13 +7493,15 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_live_migration_wait_vif_plugged_vif_plug_error(
self, mock_rollback_live_mig, mock_pre_live_mig):
self, mock_get_bdms, mock_rollback_live_mig, mock_pre_live_mig):
"""Tests the scenario where wait_for_instance_event fails with
VirtualInterfacePlugException.
"""
migrate_data = objects.LibvirtLiveMigrateData(
wait_for_vif_plugged=True)
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
mock_pre_live_mig.return_value = migrate_data
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
@ -7517,14 +7524,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_live_migration_wait_vif_plugged_timeout_error(
self, mock_rollback_live_mig, mock_pre_live_mig):
self, mock_get_bdms, mock_rollback_live_mig, mock_pre_live_mig):
"""Tests the scenario where wait_for_instance_event raises an
eventlet Timeout exception and we're configured such that vif plugging
failures are fatal (which is the default).
"""
migrate_data = objects.LibvirtLiveMigrateData(
wait_for_vif_plugged=True)
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
mock_pre_live_mig.return_value = migrate_data
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
@ -7549,14 +7558,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._post_live_migration')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_live_migration_wait_vif_plugged_timeout_non_fatal(
self, mock_post_live_mig, mock_rollback_live_mig,
self, mock_get_bdms, mock_post_live_mig, mock_rollback_live_mig,
mock_pre_live_mig):
"""Tests the scenario where wait_for_instance_event raises an
eventlet Timeout exception and we're configured such that vif plugging
failures are NOT fatal.
"""
self.flags(vif_plugging_is_fatal=False)
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
migrate_data = objects.LibvirtLiveMigrateData(
wait_for_vif_plugged=True)
mock_pre_live_mig.return_value = migrate_data
@ -7590,6 +7601,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
'fake', self.instance, True, migration, {})
self.assertEqual('error', migration.status)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch(
'nova.compute.manager.ComputeManager._notify_about_instance_usage')
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@ -7598,7 +7610,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
def test_live_migration_aborted_before_running(self, mock_rpc,
mock_rollback,
mock_action_notify,
mock_usage_notify):
mock_usage_notify,
mock_get_bdms):
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
migrate_data = objects.LibvirtLiveMigrateData(
wait_for_vif_plugged=True)
mock_rpc.return_value = migrate_data
@ -7850,12 +7864,13 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, 'compute_rpcapi')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def _do_call(bdm, nwapi, notify, rpc, update):
def _do_call(nwapi, notify, rpc, update):
bdms = objects.BlockDeviceMappingList(objects=[])
return self.compute._post_live_migration(self.context,
self.instance,
'foo',
*args, **kwargs)
*args, source_bdms=bdms,
**kwargs)
result = _do_call()
mock_clean.assert_called_once_with(self.context, self.instance.uuid)
return result
@ -7908,13 +7923,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
{'source_type': 'image', 'destination_type': 'local',
'volume_id': volume_id, 'device_name': '/dev/vdb',
'instance_uuid': instance.uuid})
vol_bdm.attachment_id = uuids.attachment1
orig_attachment_id = uuids.attachment2
vol_bdm.attachment_id = uuids.attachment
migrate_data = migrate_data_obj.LiveMigrateData()
migrate_data.migration = objects.Migration(uuid=uuids.migration,
dest_node=instance.node,
source_node='src')
migrate_data.old_vol_attachment_ids = {volume_id: orig_attachment_id}
image_bdm.attachment_id = uuids.attachment3
@mock.patch('nova.compute.utils.notify_about_instance_action')
@ -7932,20 +7945,19 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(compute, 'driver')
@mock.patch.object(compute, '_notify_about_instance_usage')
@mock.patch.object(compute, 'network_api')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def _test(mock_get_bdms, mock_net_api, mock_notify, mock_driver,
def _test(mock_net_api, mock_notify, mock_driver,
mock_rpc, mock_get_bdm_info, mock_attach_delete,
mock_update_resource, mock_bdm_save, mock_ga,
mock_clean, mock_notify_action):
self._mock_rt()
mock_get_bdms.return_value = [vol_bdm, image_bdm]
bdms = objects.BlockDeviceMappingList(objects=[vol_bdm, image_bdm])
compute._post_live_migration(self.context, instance, dest_host,
migrate_data=migrate_data)
migrate_data=migrate_data,
source_bdms=bdms)
mock_attach_delete.assert_called_once_with(
self.context, orig_attachment_id)
self.context, uuids.attachment)
mock_clean.assert_called_once_with(self.context, instance.uuid)
mock_notify_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
@ -7969,6 +7981,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
migrate_data = objects.LibvirtLiveMigrateData()
source_vif = network_model.VIF(uuids.port_id, type='ovs')
migrate_data.vifs = [objects.VIFMigrateData(source_vif=source_vif)]
bdms = objects.BlockDeviceMappingList(objects=[])
nw_info = network_model.NetworkInfo(
[network_model.VIF(uuids.port_id, type='ovn')])
@ -8013,7 +8026,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self._mock_rt()
self.compute._post_live_migration(
self.context, self.instance, 'fake-dest',
migrate_data=migrate_data)
migrate_data=migrate_data, source_bdms=bdms)
post_live_migration_at_source.assert_called_once_with(
self.context, self.instance,
test.MatchType(network_model.NetworkInfo))

View File

@ -12369,88 +12369,37 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_size.called_once_with('/test/disk')
def test_post_live_migration(self):
vol = {'block_device_mapping': [
vol1_conn_info = {'data': {'test_data': mock.sentinel.vol1},
'serial': 'fake_serial1'}
vol2_conn_info = {'data': {'test_data': mock.sentinel.vol2},
'serial': 'fake_serial2'}
bdi = {'block_device_mapping': [
{'attachment_id': None,
'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': 'fake_serial1'},
'connection_info': vol1_conn_info,
'mount_device': '/dev/sda',
},
{'attachment_id': None,
'connection_info': {
'data': {},
'serial': 'fake_serial2'},
'connection_info': vol2_conn_info,
'mount_device': '/dev/sdb', }]}
def fake_initialize_connection(context, volume_id, connector):
return {'data': {}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_connector = {'host': 'fake'}
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with test.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, "get_volume_connector",
return_value=fake_connector),
mock.patch.object(drvr._volume_api, "initialize_connection",
side_effect=fake_initialize_connection),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, get_volume_connector,
initialize_connection, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
get_volume_connector.assert_has_calls([
mock.call(inst_ref)])
_disconnect_volume.assert_has_calls([
mock.call(cntx, {'data': {'multipath_id': 'dummy1'}},
inst_ref),
mock.call(cntx, {'data': {}}, inst_ref)])
def test_post_live_migration_cinder_v3(self):
cntx = context.get_admin_context()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = fake_instance.fake_instance_obj(cntx,
uuid=uuids.instance)
vol_id = uuids.volume
old_attachment_id = uuids.attachment
disk_dev = 'sda'
connection_info = {
'data': {'multipath_id': 'dummy1'},
'serial': vol_id}
block_device_mapping = [
{'attachment_id': uuids.attachment,
'mount_device': '/dev/%s' % disk_dev,
'connection_info': connection_info}]
old_attachment = {
'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': vol_id}}
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True,
old_vol_attachment_ids={vol_id: old_attachment_id})
@mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=bdi['block_device_mapping'])
@mock.patch.object(drvr, '_disconnect_volume')
@mock.patch.object(drvr._volume_api, 'attachment_get')
@mock.patch.object(driver, 'block_device_info_get_mapping')
def _test(mock_get_bdms, mock_attachment_get, mock_disconnect):
mock_get_bdms.return_value = block_device_mapping
mock_attachment_get.return_value = old_attachment
def _test(_disconnect_volume, block_device_info_get_mapping):
drvr.post_live_migration(cntx, inst_ref, bdi)
drvr.post_live_migration(cntx, instance, None,
migrate_data=migrate_data)
block_device_info_get_mapping.assert_called_once_with(bdi)
_disconnect_volume.assert_has_calls([
mock.call(cntx, vol1_conn_info, inst_ref),
mock.call(cntx, vol2_conn_info, inst_ref)])
mock_attachment_get.assert_called_once_with(cntx,
old_attachment_id)
mock_disconnect.assert_called_once_with(cntx, connection_info,
instance)
_test()
@mock.patch('os.stat')

View File

@ -7962,40 +7962,12 @@ class LibvirtDriver(driver.ComputeDriver):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_api = self._volume_api
for vol in block_device_mapping:
volume_id = vol['connection_info']['serial']
if vol['attachment_id'] is None:
# Cinder v2 api flow: Retrieve connection info from Cinder's
# initialize_connection API. The info returned will be
# accurate for the source server.
connector = self.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(
context, volume_id, connector)
else:
# cinder v3.44 api flow: Retrieve the connection_info for
# the old attachment from cinder.
old_attachment_id = \
migrate_data.old_vol_attachment_ids[volume_id]
old_attachment = volume_api.attachment_get(
context, old_attachment_id)
connection_info = old_attachment['connection_info']
# TODO(leeantho) The following multipath_id logic is temporary
# and will be removed in the future once os-brick is updated
# to handle multipath for drivers in a more efficient way.
# For now this logic is needed to ensure the connection info
# data is correct.
# Pull out multipath_id from the bdm information. The
# multipath_id can be placed into the connection info
# because it is based off of the volume and will be the
# same on the source and destination hosts.
if 'multipath_id' in vol['connection_info']['data']:
multipath_id = vol['connection_info']['data']['multipath_id']
connection_info['data']['multipath_id'] = multipath_id
self._disconnect_volume(context, connection_info, instance)
# NOTE(mdbooth): The block_device_info we were passed was
# initialized with BDMs from the source host before they were
# updated to point to the destination. We can safely use this to
# disconnect the source without re-fetching.
self._disconnect_volume(context, vol['connection_info'], instance)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.