Actually pass the migration data object down to the virt drivers

This makes the compute manager actually pass the objects down to the
virt drivers and makes the virt drivers use objects natively.

Related to blueprint objectify-live-migrate-data

Change-Id: I620d59b2f846e1c577f6714a84e32ac6df2d6fee
This commit is contained in:
Dan Smith 2015-11-20 10:22:24 -08:00
parent 5dd9b23e6b
commit 69e0175807
7 changed files with 218 additions and 164 deletions

View File

@ -5109,7 +5109,10 @@ class ComputeManager(manager.Manager):
migrate_data)
if isinstance(pre_live_migration_data,
migrate_data_obj.LiveMigrateData):
pre_live_migration_data = pre_live_migration_data.to_legacy_dict()
pre_live_migration_data = pre_live_migration_data.to_legacy_dict(
pre_migration_result=True)
pre_live_migration_data = pre_live_migration_data[
'pre_live_migration_result']
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
@ -5130,6 +5133,21 @@ class ComputeManager(manager.Manager):
return pre_live_migration_data
def _get_migrate_data_obj(self):
# FIXME(danms): A couple patches from now, we'll be able to
# avoid this failure _if_ we get a new-style call with the
# object.
if CONF.compute_driver.startswith('libvirt'):
return objects.LibvirtLiveMigrateData()
elif CONF.compute_driver.startswith('xenapi'):
return objects.XenapiLiveMigrateData()
else:
LOG.error(_('Older RPC caller and unsupported virt driver in '
'use. Unable to handle this!'))
raise exception.MigrationError(
_('Unknown compute driver while providing compatibility '
'with older RPC formats'))
def _do_live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
# NOTE(danms): We should enhance the RT to account for migrations
@ -5153,7 +5171,8 @@ class ComputeManager(manager.Manager):
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
migrate_data_obj = self._get_migrate_data_obj()
migrate_data_obj.from_legacy_dict(migrate_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
@ -5164,12 +5183,12 @@ class ComputeManager(manager.Manager):
self._set_migration_status(migration, 'running')
migrate_data['migration'] = migration
migrate_data_obj.migration = migration
try:
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
block_migration, migrate_data_obj)
except Exception:
# Executing live migration
# live_migration might raises exceptions, but
@ -5226,11 +5245,9 @@ class ComputeManager(manager.Manager):
# block storage or instance path were shared
is_shared_block_storage = not block_migration
is_shared_instance_path = not block_migration
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', is_shared_block_storage)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', is_shared_instance_path)
if isinstance(migrate_data, objects.LibvirtLiveMigrateData):
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
@ -5347,9 +5364,9 @@ class ComputeManager(manager.Manager):
instance=instance)
self._clean_instance_console_tokens(ctxt, instance)
if migrate_data and migrate_data.get('migration'):
migrate_data['migration'].status = 'completed'
migrate_data['migration'].save()
if migrate_data and migrate_data.obj_attr_is_set('migration'):
migrate_data.migration.status = 'completed'
migrate_data.migration.save()
def _consoles_enabled(self):
"""Returns whether a console is enable."""
@ -5450,8 +5467,10 @@ class ComputeManager(manager.Manager):
# NOTE(danms): Pop out the migration object so we don't pass
# it over RPC unintentionally below
if migrate_data:
if isinstance(migrate_data, dict):
migration = migrate_data.pop('migration', None)
elif isinstance(migrate_data, migrate_data_obj.LiveMigrateData):
migration = migrate_data.migration
else:
migration = None
@ -5472,6 +5491,8 @@ class ComputeManager(manager.Manager):
block_migration, migrate_data)
if do_cleanup:
if isinstance(migrate_data, migrate_data_obj.LiveMigrateData):
migrate_data = migrate_data.to_legacy_dict()
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)

View File

@ -68,6 +68,7 @@ from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova import policy
from nova import quota
from nova.scheduler import client as scheduler_client
@ -5625,11 +5626,13 @@ class ComputeTestCase(BaseTestCase):
migration = objects.Migration()
ret = self.compute.live_migration(c, dest=dest,
instance=instance,
block_migration=False,
migration=migration,
migrate_data=migrate_data)
with mock.patch.object(self.compute, '_get_migrate_data_obj') as gmdo:
gmdo.return_value = migrate_data_obj.LiveMigrateData()
ret = self.compute.live_migration(c, dest=dest,
instance=instance,
block_migration=False,
migration=migration,
migrate_data=migrate_data)
self.assertIsNone(ret)
event_mock.assert_called_with(
@ -5688,7 +5691,10 @@ class ComputeTestCase(BaseTestCase):
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False}
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False,
is_shared_block_storage=False,
block_migration=False)
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
self.assertIn('cleanup', result)
@ -5711,7 +5717,9 @@ class ComputeTestCase(BaseTestCase):
'power_state': power_state.PAUSED})
instance.save()
migrate_data = {'migration': mock.MagicMock()}
migration_obj = objects.Migration()
migrate_data = migrate_data_obj.LiveMigrateData(
migration=migration_obj)
# creating mocks
with test.nested(
@ -5727,12 +5735,13 @@ class ComputeTestCase(BaseTestCase):
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
'clear_events_for_instance'),
mock.patch.object(self.compute, 'update_available_resource')
mock.patch.object(self.compute, 'update_available_resource'),
mock.patch.object(migration_obj, 'save'),
) as (
post_live_migration, unfilter_instance,
migrate_instance_start, post_live_migration_at_destination,
post_live_migration_at_source, setup_networks_on_host,
clear_events, update_available_resource
clear_events, update_available_resource, mig_save
):
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
@ -5753,8 +5762,8 @@ class ComputeTestCase(BaseTestCase):
[mock.call(c, instance, [])])
clear_events.assert_called_once_with(instance)
update_available_resource.assert_has_calls([mock.call(c)])
self.assertEqual('completed', migrate_data['migration'].status)
migrate_data['migration'].save.assert_called_once_with()
self.assertEqual('completed', migration_obj.status)
mig_save.assert_called_once_with()
def test_post_live_migration_terminate_volume_connections(self):
c = context.get_admin_context()

View File

@ -176,9 +176,10 @@ class _TestLibvirtLiveMigrateData(object):
serial_listen_addr='127.0.0.1',
bdms=[test_bdmi])
obj2 = migrate_data.LibvirtLiveMigrateData()
obj2.from_legacy_dict(obj.to_legacy_dict())
obj2.from_legacy_dict(obj.to_legacy_dict(pre_migration_result=True))
self.assertEqual(obj.to_legacy_dict(),
obj2.to_legacy_dict())
self.assertEqual(obj.bdms[0].serial, obj2.bdms[0].serial)
class TestLibvirtLiveMigrateData(test_objects._LocalTest,

View File

@ -6523,6 +6523,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='127.0.0.1',
bdms=[])
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
@ -6542,21 +6547,24 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
migrate_data = {'pre_live_migration_result':
{'volume': {u'58a84f6d-3f0c-4e19-a0af-eb657b790657':
{'connection_info': {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'disk_info': {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'}}}},
'graphics_listen_addrs': {}}
pre_live_migrate_data = ((migrate_data or {}).
get('pre_live_migration_result', {}))
volume = pre_live_migrate_data.get('volume')
connection_info = {
u'driver_volume_type': u'iscsi',
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdm = objects.LibvirtLiveMigrateBDMInfo(
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
bus='virtio', type='disk', dev='vdb',
connection_info=connection_info)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
bdms=[bdm])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
@ -6574,7 +6582,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock))
mupdate.assert_called_once_with(target_xml, volume, None, None)
mupdate.assert_called_once_with(target_xml, migrate_data.bdms,
{}, '')
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -6589,40 +6598,35 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
bus='virtio',
type='disk',
dev='vdb')
bdmi.connection_info = {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.source_device = bdmi.type
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.target_dev = bdmi.dev
conf.target_bus = bdmi.bus
conf.serial = bdmi.connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
conf.source_path = bdmi.connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
parser = etree.XMLParser(remove_blank_text=True)
xml_doc = etree.fromstring(initial_xml, parser)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
config = drvr._update_volume_xml(xml_doc, [bdmi])
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
@ -6652,39 +6656,38 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
connection_info = {
u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
bus='virtio',
dev='vdb',
type='disk')
bdmi.connection_info = connection_info
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.source_device = bdmi.type
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.target_dev = bdmi.dev
conf.target_bus = bdmi.bus
conf.serial = bdmi.connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
conf.source_path = bdmi.connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
config = drvr._update_volume_xml(xml_doc, [bdmi])
self.assertEqual(target_xml, etree.tostring(config))
def test_update_volume_xml_no_connection_info(self):
@ -6699,14 +6702,17 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'info1': {}, 'info2': {}}
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
dev='vdb',
type='disk',
bus='scsi',
format='qcow')
bdmi.connection_info = {}
conf = vconfig.LibvirtConfigGuestDisk()
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
config = drvr._update_volume_xml(xml_doc, [bdmi])
self.assertEqual(target_xml, etree.tostring(config))
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI2")
@ -6735,10 +6741,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
# start test
bandwidth = CONF.libvirt.live_migration_bandwidth
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'},
'serial_listen_addr': '9.0.0.12'}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='9.0.0.12',
bdms=[])
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
@ -6759,12 +6766,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
CONF.set_override("enabled", True, "serial_console")
dom = fakelibvirt.virDomain
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, None, dom)
False, migrate_data, dom)
def test_live_migration_fails_with_invalid_live_migration_flag(self):
self.flags(live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, "
@ -6802,9 +6811,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
bdms=[])
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
@ -6831,7 +6842,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {}
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
bdms=[])
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
@ -6855,9 +6868,10 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(vdmock, "migrateToURI")
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='1.2.3.4',
graphics_listen_addr_spice='1.2.3.4',
serial_listen_addr='127.0.0.1')
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
@ -6897,9 +6911,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
fakelibvirt.libvirtError('ERR'))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='127.0.0.1',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
bdms=[])
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
@ -6940,8 +6956,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
_bandwidth).AndRaise(test.TestingException('oops'))
graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs': graphics_listen_addrs}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
bdms=[])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(
@ -6972,7 +6991,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': False}
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False,
instance_relative_path=False)
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
@ -6990,7 +7011,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': True}
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=True,
instance_relative_path=False)
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],

View File

@ -3536,16 +3536,16 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'is_volume_backed': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
dest_check_data = objects.XenapiLiveMigrateData(
block_migration=True,
is_volume_backed=True,
destination_sr_ref=None,
migrate_send_data=None)
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result.to_legacy_dict())
self.assertEqual(dest_check_data.to_legacy_dict(),
result.to_legacy_dict())
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
@ -3676,8 +3676,9 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"})
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
@ -3702,8 +3703,9 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
block_migration, migrate_data):
post_method.called = True
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref="foo",
migrate_send_data={"bar": "baz"})
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
@ -3736,7 +3738,9 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
migrate_data = objects.XenapiLiveMigrateData(
destination_sr_ref='foo',
migrate_send_data={'bar': 'baz'})
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
@ -3749,7 +3753,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual('SOMEDATA', migrate_data)
self.assertEqual({'SOMEDATA': 'SOMEVAL'}, migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
@ -3767,12 +3771,14 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
def dummy_callback(*args, **kwargs):
pass
migrate_data = objects.XenapiLiveMigrateData(
migrate_send_data={'SOMEDATA': 'SOMEVAL'},
destination_sr_ref='TARGET_SR_OPAQUE_REF')
conn.live_migration(
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
migrate_data=migrate_data)
def test_live_migrate_pool_migration_xapi_call_parameters(self):
@ -3795,10 +3801,13 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
def dummy_callback(*args, **kwargs):
pass
migrate_data = objects.XenapiLiveMigrateData(
migrate_send_data={'foo': 'bar'},
destination_sr_ref='foo')
self.assertRaises(IOError, conn.live_migration,
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data={})
block_migration=False, migrate_data=migrate_data)
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)

View File

@ -5559,11 +5559,12 @@ class LibvirtDriver(driver.ComputeDriver):
post_method, recover_method, block_migration,
migrate_data)
def _update_xml(self, xml_str, volume, listen_addrs, serial_listen_addr):
def _update_xml(self, xml_str, migrate_bdm_info, listen_addrs,
serial_listen_addr):
xml_doc = etree.fromstring(xml_str)
if volume:
xml_doc = self._update_volume_xml(xml_doc, volume)
if migrate_bdm_info:
xml_doc = self._update_volume_xml(xml_doc, migrate_bdm_info)
if listen_addrs:
xml_doc = self._update_graphics_xml(xml_doc, listen_addrs)
else:
@ -5589,24 +5590,23 @@ class LibvirtDriver(driver.ComputeDriver):
return xml_doc
def _update_volume_xml(self, xml_doc, volume):
def _update_volume_xml(self, xml_doc, migrate_bdm_info):
"""Update XML using device information of destination host."""
# Update volume xml
parser = etree.XMLParser(remove_blank_text=True)
disk_nodes = xml_doc.findall('./devices/disk')
bdm_info_by_serial = {x.serial: x for x in migrate_bdm_info}
for pos, disk_dev in enumerate(disk_nodes):
serial_source = disk_dev.findtext('serial')
if serial_source is None or volume.get(serial_source) is None:
bdm_info = bdm_info_by_serial.get(serial_source)
if (serial_source is None or
not bdm_info or not bdm_info.connection_info or
serial_source not in bdm_info_by_serial):
continue
if ('connection_info' not in volume[serial_source] or
'disk_info' not in volume[serial_source]):
continue
conf = self._get_volume_config(
volume[serial_source]['connection_info'],
volume[serial_source]['disk_info'])
bdm_info.connection_info, bdm_info.as_disk_info())
xml_doc2 = etree.XML(conf.to_xml(), parser)
serial_dest = xml_doc2.findtext('serial')
@ -5663,9 +5663,9 @@ class LibvirtDriver(driver.ComputeDriver):
' the local address (127.0.0.1 or ::1).')
raise exception.MigrationError(reason=msg)
if listen_addrs is not None:
dest_local_vnc = listen_addrs['vnc'] in LOCAL_ADDRS
dest_local_spice = listen_addrs['spice'] in LOCAL_ADDRS
if listen_addrs:
dest_local_vnc = listen_addrs.get('vnc') in LOCAL_ADDRS
dest_local_spice = listen_addrs.get('spice') in LOCAL_ADDRS
if ((CONF.vnc.enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
@ -5730,18 +5730,20 @@ class LibvirtDriver(driver.ComputeDriver):
flagvals = [getflag(x.strip()) for x in flaglist]
logical_sum = six.moves.reduce(lambda x, y: x | y, flagvals)
pre_live_migrate_data = (migrate_data or {}).get(
'pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
volume = pre_live_migrate_data.get('volume')
serial_listen_addr = pre_live_migrate_data.get(
'serial_listen_addr')
listen_addrs = {}
if 'graphics_listen_addr_vnc' in migrate_data:
listen_addrs['vnc'] = str(
migrate_data.graphics_listen_addr_vnc)
if 'graphics_listen_addr_spice' in migrate_data:
listen_addrs['spice'] = str(
migrate_data.graphics_listen_addr_spice)
serial_listen_addr = migrate_data.serial_listen_addr
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
if (migratable_flag is None or
(listen_addrs is None and not volume)):
if (migratable_flag is None or (
not listen_addrs and not migrate_data.bdms)):
# TODO(alexs-h): These checks could be moved to the
# check_can_live_migrate_destination/source phase
self._check_graphics_addresses_can_live_migrate(listen_addrs)
@ -5753,7 +5755,7 @@ class LibvirtDriver(driver.ComputeDriver):
else:
old_xml_str = guest.get_xml_desc(dump_migratable=True)
new_xml_str = self._update_xml(old_xml_str,
volume,
migrate_data.bdms,
listen_addrs,
serial_listen_addr)
try:
@ -6270,18 +6272,11 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(gcb): Failed block live migration may leave instance
# directory at destination node, ensure it is always deleted.
is_shared_instance_path = True
instance_relative_path = None
if migrate_data:
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
instance_relative_path = migrate_data.get(
'instance_relative_path')
mdo = objects.LibvirtLiveMigrateData()
if instance_relative_path:
mdo.instance_relative_path = instance_relative_path
is_shared_instance_path = migrate_data.is_shared_instance_path
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, mdo)
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)

View File

@ -2198,13 +2198,9 @@ class VMOps(object):
if ('block_migration' in dest_check_data and
dest_check_data.block_migration):
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = {
'destination_sr_ref': dest_check_data.destination_sr_ref,
'migrate_send_data': dest_check_data.migrate_send_data,
}
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
"VM.assert_can_migrate", vm_ref, dest_check_data)
except self._session.XenAPI.Failure as exc:
reason = exc.details[0]
msg = _('assert_can_migrate failed because: %s') % reason
@ -2288,8 +2284,8 @@ class VMOps(object):
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
destination_sr_ref = migrate_data.destination_sr_ref
migrate_send_data = migrate_data.migrate_send_data
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
@ -2331,8 +2327,8 @@ class VMOps(object):
if migrate_data is not None:
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(
self._session, vm_ref)
migrate_data['kernel-file'] = kernel
migrate_data['ramdisk-file'] = ramdisk
migrate_data.kernel_file = kernel
migrate_data.ramdisk_file = ramdisk
if block_migration:
if not migrate_data:
@ -2366,8 +2362,8 @@ class VMOps(object):
def post_live_migration(self, context, instance, migrate_data=None):
if migrate_data is not None:
vm_utils.destroy_kernel_ramdisk(self._session, instance,
migrate_data.get('kernel-file'),
migrate_data.get('ramdisk-file'))
migrate_data.kernel_file,
migrate_data.ramdisk_file)
def post_live_migration_at_destination(self, context, instance,
network_info, block_migration,