Allow block live migration of an instance with attached volumes

Since libvirt 1.2.17 it is possible to select which block devices
should be migrated to destination host. Block devices that are not
provided will not be migrated. It means that it is possible to
exclude volumes from block migration and therefore prevent volumes
from being copied to themselves.

This patch implements new check of libvirt version. If version is
higher or equal to 1.2.17 it is possible to block live migrate vm
with attached volumes.

Co-Authored-By: Bartosz Fic <bartosz.fic@intel.com>

Change-Id: I8fcc3ef3cb5d9fd3a95067929c496fdb5976fd41
Closes-Bug: #1398999
Partially implements: blueprint block-live-migrate-with-attached-volumes
This commit is contained in:
Pawel Koniszewski 2016-02-10 13:09:44 +01:00
parent f69ee4a8b1
commit 23fd0389f0
3 changed files with 280 additions and 63 deletions

View File

@ -592,6 +592,13 @@ class Domain(object):
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI3(self, dconnuri, params, logical_sum):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateSetMaxDowntime(self, downtime):
pass

View File

@ -6574,6 +6574,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
@mock.patch.object(host.Host, 'has_min_version', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
@ -6583,15 +6584,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
'_is_shared_block_storage', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migrate_source_block_migration_with_bdm(
'_check_shared_storage_test_file', return_value=False)
def test_check_can_live_migrate_source_block_migration_with_bdm_error(
self, mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local, mock_enough):
mock_booted_from_volume, mock_has_local, mock_enough,
mock_min_version):
mock_check.return_value = False
mock_shared_block.return_value = False
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -6606,6 +6606,40 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.context, instance, dest_check_data,
block_device_info=bdi)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_has_local_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_booted_from_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file', return_value=False)
def test_check_can_live_migrate_source_block_migration_with_bdm_success(
self, mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local, mock_enough,
mock_min_version):
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = objects.LibvirtLiveMigrateData(
disk_over_commit=False,
filename='file',
disk_available_mb=100,
image_type='default',
block_migration=True)
return_value = drvr.check_can_live_migrate_source(
self.context, instance, migrate_data, block_device_info=bdi)
self.assertEqual(migrate_data, return_value)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
@ -6847,7 +6881,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
False, migrate_data, vdmock, [])
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
@ -6896,7 +6930,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock))
migrate_data, test_mock, []))
mupdate.assert_called_once_with(target_xml, migrate_data.bdms,
{}, '')
@ -6940,7 +6974,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
test_mock.XMLDesc.return_value = target_xml
drvr._live_migration_operation(self.context, instance_ref,
'dest', False, migrate_data,
test_mock)
test_mock, [])
test_mock.migrateToURI2.assert_called_once_with(
'qemu+tcp://127.0.0.2/system',
None, mupdate(), None, None, 0)
@ -7112,7 +7146,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, dom)
False, migrate_data, dom, [])
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrate.assert_called_once_with(
@ -7134,7 +7168,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, dom)
False, migrate_data, dom, [])
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
@ -7168,7 +7202,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
False, migrate_data, vdmock, [])
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
@ -7198,7 +7232,42 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
False, migrate_data, vdmock, [])
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml',
return_value='')
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
def test_live_migration_uses_migrateToURI3(
self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
mock_min_version):
# Preparing mocks
disk_paths = ['vda', 'vdb']
params = {
'migrate_disks': ['vda', 'vdb'],
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '',
}
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[])
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance, 'dest',
False, migrate_data, dom, disk_paths)
mock_migrateToURI3.assert_called_once_with(
CONF.libvirt.live_migration_uri % 'dest', params, None)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
@ -7226,7 +7295,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
False, migrate_data, vdmock, [])
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
@ -7271,7 +7340,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
False, migrate_data, vdmock, [])
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
@ -7323,7 +7392,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
False, migrate_data, vdmock, [])
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@ -7373,8 +7442,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(host.Host, "has_min_version", return_value=False)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths(self, mock_xml):
def test_live_migration_copy_disk_paths(self, mock_xml, mock_version):
xml = """
<domain>
<name>dummy</name>
@ -7382,17 +7452,21 @@ class LibvirtConnTestCase(test.NoDBTestCase):
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
@ -7407,9 +7481,101 @@ class LibvirtConnTestCase(test.NoDBTestCase):
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
paths = drvr._live_migration_copy_disk_paths(guest)
self.assertEqual(["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], paths)
paths = drvr._live_migration_copy_disk_paths(None, None, guest)
self.assertEqual((["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], ['vda', 'vdd']), paths)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
@mock.patch('nova.virt.driver.get_block_device_info')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths_selective_block_migration(
self, mock_xml, mock_get_instance,
mock_block_device_info, mock_version):
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
block_device_info = {
'swap': {
'disk_bus': u'virtio',
'swap_size': 10,
'device_name': u'/dev/vdc'
},
'root_device_name': u'/dev/vda',
'ephemerals': [{
'guest_format': u'ext3',
'device_name': u'/dev/vdb',
'disk_bus': u'virtio',
'device_type': u'disk',
'size': 1
}],
'block_device_mapping': [{
'guest_format': None,
'boot_index': None,
'mount_device': u'/dev/vdd',
'connection_info': {
u'driver_volume_type': u'iscsi',
'serial': u'147df29f-aec2-4851-b3fe-f68dad151834',
u'data': {
u'access_mode': u'rw',
u'target_discovered': False,
u'encrypted': False,
u'qos_specs': None,
u'target_iqn': u'iqn.2010-10.org.openstack:'
u'volume-147df29f-aec2-4851-b3fe-'
u'f68dad151834',
u'target_portal': u'10.102.44.141:3260', u'volume_id':
u'147df29f-aec2-4851-b3fe-f68dad151834',
u'target_lun': 1,
u'auth_password': u'cXELT66FngwzTwpf',
u'auth_username': u'QbQQjj445uWgeQkFKcVw',
u'auth_method': u'CHAP'
}
},
'disk_bus': None,
'device_type': None,
'delete_on_termination': False
}]
}
mock_block_device_info.return_value = block_device_info
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
return_value = drvr._live_migration_copy_disk_paths(context, instance,
guest)
expected = (['/var/lib/nova/instance/123/disk.root',
'/var/lib/nova/instance/123/disk.shared',
'/var/lib/nova/instance/123/disk.config'],
['vda', 'vdb', 'vdc'])
self.assertEqual(expected, return_value)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
@ -7788,8 +7954,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = {}
disk_paths = ['/dev/vda', '/dev/vdb']
mock_copy_disk_path.return_value = disk_paths
disks_to_copy = (['/some/path/one', '/test/path/two'],
['vda', 'vdb'])
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
@ -7802,7 +7969,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, True,
migrate_data)
mock_copy_disk_path.assert_called_once_with(guest)
mock_copy_disk_path.assert_called_once_with(self.context, instance,
guest)
class AnyEventletEvent(object):
def __eq__(self, other):
@ -7811,11 +7979,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
migrate_data, dom)
migrate_data, dom, disks_to_copy[1])
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, dom, AnyEventletEvent(), disk_paths)
migrate_data, dom, AnyEventletEvent(), disks_to_copy[0])
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)

View File

@ -425,6 +425,9 @@ MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit & rebase (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
# Libvirt version 1.2.17 is required for successfull block live migration
# of vm booted from image with attached devices
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
# libvirt discard feature
MIN_LIBVIRT_DISCARD_VERSION = (1, 0, 6)
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
@ -5517,18 +5520,27 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
if block_device_info:
bdm = block_device_info.get('block_device_mapping')
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will
# result in volumes being copied from themselves to
# themselves, which is a recipe for disaster.
if bdm and len(bdm):
LOG.error(_LE('Cannot block migrate instance %s with '
'mapped volumes'),
instance.uuid, instance=instance)
msg = (_('Cannot block migrate instance %s with mapped '
'volumes') % instance.uuid)
# NOTE(pkoniszewski): libvirt from version 1.2.17 upwards
# supports selective block device migration. It means that it
# is possible to define subset of block devices to be copied
# during migration. If they are not specified - block devices
# won't be migrated. However, it does not work when live
# migration is tunnelled through libvirt.
if bdm and not self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will result
# in volumes being copied from themselves to themselves,
# which is a recipe for disaster.
ver = ".".join([str(x) for x in
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION])
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration feature requires libvirt version'
' %(libvirt_ver)s') %
{'uuid': instance.uuid, 'libvirt_ver': ver})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path or
(booted_from_volume and not has_local_disk)):
@ -5906,7 +5918,8 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.MigrationError(reason=msg)
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, dom):
block_migration, migrate_data, dom,
device_names):
"""Invoke the live migration operation
:param context: security context
@ -5917,6 +5930,8 @@ class LibvirtDriver(driver.ComputeDriver):
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
:param dom: the libvirt domain object
:param device_names: list of device names that are being migrated with
instance
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
@ -5961,12 +5976,25 @@ class LibvirtDriver(driver.ComputeDriver):
listen_addrs,
serial_listen_addr)
try:
dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
migration_flags,
None,
CONF.libvirt.live_migration_bandwidth)
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': new_xml_str,
'migrate_disks': device_names,
}
dom.migrateToURI3(
CONF.libvirt.live_migration_uri % dest,
params,
migration_flags)
else:
dom.migrateToURI2(
CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
migration_flags,
None,
CONF.libvirt.live_migration_bandwidth)
except libvirt.libvirtError as ex:
# NOTE(mriedem): There is a bug in older versions of
# libvirt where the VIR_DOMAIN_XML_MIGRATABLE flag causes
@ -6091,37 +6119,48 @@ class LibvirtDriver(driver.ComputeDriver):
for i in range(steps + 1):
yield (int(delay * i), int(offset + base ** i))
def _live_migration_copy_disk_paths(self, guest):
def _live_migration_copy_disk_paths(self, context, instance, guest):
'''Get list of disks to copy during migration
:param context: security context
:param instance: the instance being migrated
:param guest: the Guest instance being migrated
Get the list of disks to copy during migration.
:returns: a list of local disk paths to copy
:returns: a list of local source paths and a list of device names to
copy
'''
disks = []
disk_paths = []
device_names = []
block_devices = []
# TODO(pkoniszewski): Remove this if-statement when we bump min libvirt
# version to >= 1.2.17
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance,
bdm_list)
block_device_mappings = driver.block_device_info_get_mapping(
block_device_info)
for bdm in block_device_mappings:
device_name = str(bdm['mount_device'].rsplit('/', 1)[1])
block_devices.append(device_name)
for dev in guest.get_all_disks():
# TODO(berrange) This is following the current
# (stupid) default logic in libvirt for selecting
# which disks are copied. In the future, when we
# can use a libvirt which accepts a list of disks
# to copy, we will need to adjust this to use a
# different rule.
#
# Our future goal is that a disk needs to be copied
# if it is a non-cinder volume which is not backed
# by shared storage. eg it may be an LVM block dev,
# or a raw/qcow2 file on a local filesystem. We
# never want to copy disks on NFS, or RBD or any
# cinder volume
if dev.readonly or dev.shareable:
continue
if dev.source_type not in ["file", "block"]:
continue
disks.append(dev.source_path)
return disks
if dev.target_dev in block_devices:
continue
disk_paths.append(dev.source_path)
device_names.append(dev.target_dev)
return (disk_paths, device_names)
def _live_migration_data_gb(self, instance, disk_paths):
'''Calculate total amount of data to be transferred
@ -6379,8 +6418,10 @@ class LibvirtDriver(driver.ComputeDriver):
guest = self._host.get_guest(instance)
disk_paths = []
device_names = []
if block_migration:
disk_paths = self._live_migration_copy_disk_paths(guest)
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
@ -6390,7 +6431,8 @@ class LibvirtDriver(driver.ComputeDriver):
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, dom)
migrate_data, dom,
device_names)
finish_event = eventlet.event.Event()