libvirt: Prevent block live migration with tunnelled flag

libvirt will report "Selecting disks to migrate is not
implemented for tunneled migration" while doing block migration
with VIR_MIGRATE_TUNNELLED flag.

This patch does 2 changes:

1. Raise exception.MigrationPreCheckError if block live migration with
   with mapped volumes and tunnelled flag on.
2. Remove migrate_disks from params of migrateToURI3 in case of
   tunnelled block live migration w/o mapped volumes since we want
   to copy all disks to destination

Co-Authored-By: Pawel Koniszewski <pawel.koniszewski@intel.com>
Closes-bug: #1576093

Conflicts:
	nova/tests/unit/virt/libvirt/test_driver.py
	nova/virt/libvirt/driver.py

Change-Id: Id6e49f298133c53d21386ea619c83e413ef3117a
(cherry picked from commit 1885a39083)
This commit is contained in:
Eli Qiao 2016-06-13 10:58:29 +02:00 committed by Eli Qiao
parent c8ec9ebf37
commit 4bc51937a2
2 changed files with 148 additions and 5 deletions

View File

@ -6564,6 +6564,41 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.context, instance, dest_check_data,
block_device_info=bdi)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_has_local_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_booted_from_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file', return_value=False)
def test_check_can_live_migrate_source_bm_with_bdm_tunnelled_error(
self, mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local, mock_enough,
mock_min_version):
self.flags(live_migration_tunnelled=True,
group='libvirt')
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
block_migration=True,
disk_over_commit=False,
disk_available_mb=100)
drvr._parse_migration_flags()
self.assertRaises(exception.MigrationPreCheckError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data,
block_device_info=bdi)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@ -7291,6 +7326,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_live_migration_uses_migrateToURI3(
self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
mock_min_version):
self.flags(live_migration_tunnelled=False, group='libvirt')
# Preparing mocks
disk_paths = ['vda', 'vdb']
params = {
@ -7311,13 +7347,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance, 'dest',
False, migrate_data, dom, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri('dest'), params, None)
drvr._live_migration_uri('dest'), params, 19)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
@ -7348,6 +7385,39 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml',
return_value='')
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
def test_block_live_migration_tunnelled_migrateToURI3(
self, mock_old_xml, mock_new_xml,
mock_migrateToURI3, mock_min_version):
self.flags(live_migration_tunnelled=True, group='libvirt')
# Preparing mocks
disk_paths = []
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '',
}
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[],
block_migration=True)
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance, 'dest',
True, migrate_data, dom, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri('dest'), params, 151)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
@ -7501,6 +7571,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch.object(host.Host, "has_min_version", return_value=False)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths(self, mock_xml, mock_version):
self.flags(live_migration_tunnelled=False, group='libvirt')
xml = """
<domain>
<name>dummy</name>
@ -7534,6 +7605,51 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_xml.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
paths = drvr._live_migration_copy_disk_paths(None, None, guest)
self.assertEqual((["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], ['vda', 'vdd']), paths)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths_tunnelled(self, mock_xml):
self.flags(live_migration_tunnelled=True, group='libvirt')
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
@ -7548,6 +7664,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_live_migration_copy_disk_paths_selective_block_migration(
self, mock_xml, mock_get_instance,
mock_block_device_info, mock_version):
self.flags(live_migration_tunnelled=False, group='libvirt')
xml = """
<domain>
<name>dummy</name>
@ -7623,6 +7740,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
}
mock_block_device_info.return_value = block_device_info
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
return_value = drvr._live_migration_copy_disk_paths(context, instance,

View File

@ -5570,6 +5570,17 @@ class LibvirtDriver(driver.ComputeDriver):
{'uuid': instance.uuid, 'libvirt_ver': ver})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
# NOTE(eliqiao): Selective disk migrations are not supported
# with tunnelled block migrations so we can block them early.
if (bdm and
(self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0)):
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration is not supported with tunnelled'
' block migrations.') % {'uuid': instance.uuid})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path or
(booted_from_volume and not has_local_disk)):
@ -6036,6 +6047,18 @@ class LibvirtDriver(driver.ComputeDriver):
'destination_xml': new_xml_str,
'migrate_disks': device_names,
}
# NOTE(pkoniszewski): Because of precheck which blocks
# tunnelled block live migration with mapped volumes we
# can safely remove migrate_disks when tunnelling is
# on. Otherwise we will block all tunnelled block
# migrations, even when an instance does not have
# volumes mapped. This is because selective disk
# migration is not supported in tunnelled block live
# migration. Also we cannot fallback to migrateToURI2
# in this case because of bug #1398999
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0):
params.pop('migrate_disks')
dom.migrateToURI3(
self._live_migration_uri(dest),
params,
@ -6189,10 +6212,12 @@ class LibvirtDriver(driver.ComputeDriver):
device_names = []
block_devices = []
# TODO(pkoniszewski): Remove this if-statement when we bump min libvirt
# version to >= 1.2.17
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
# TODO(pkoniszewski): Remove version check when we bump min libvirt
# version to >= 1.2.17.
if (self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0 and
self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION)):
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance,