Merge "hyperv: Cleans up live migration Planned VM" into stable/rocky

This commit is contained in:
Zuul 2018-10-16 10:29:56 +00:00 committed by Gerrit Code Review
commit 8ba17c5209
6 changed files with 58 additions and 18 deletions

View File

@ -6529,23 +6529,22 @@ class ComputeManager(manager.Manager):
# migrate_data objects for drivers that expose block live migration
# information (i.e. Libvirt, Xenapi and HyperV). For other drivers
# cleanup is not needed.
is_shared_block_storage = True
is_shared_instance_path = True
do_cleanup = False
destroy_disks = False
if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData):
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared
# storage
do_cleanup = not migrate_data.is_shared_instance_path
destroy_disks = not migrate_data.is_shared_block_storage
elif isinstance(migrate_data, migrate_data_obj.XenapiLiveMigrateData):
is_shared_block_storage = not migrate_data.block_migration
is_shared_instance_path = not migrate_data.block_migration
do_cleanup = migrate_data.block_migration
destroy_disks = migrate_data.block_migration
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):
is_shared_instance_path = migrate_data.is_shared_instance_path
is_shared_block_storage = migrate_data.is_shared_instance_path
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = not is_shared_instance_path
destroy_disks = not is_shared_block_storage
# NOTE(claudiub): We need to cleanup any zombie Planned VM.
do_cleanup = True
destroy_disks = not migrate_data.is_shared_instance_path
return (do_cleanup, destroy_disks)

View File

@ -8359,6 +8359,13 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def test_live_migration_cleanup_flags_shared_hyperv(self):
migrate_data = objects.HyperVLiveMigrateData(
is_shared_instance_path=True)
do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags(
migrate_data)
self.assertTrue(do_cleanup)
self.assertFalse(destroy_disks)
def test_live_migration_cleanup_flags_other(self):
migrate_data = mock.Mock()
do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags(
migrate_data)
self.assertFalse(do_cleanup)

View File

@ -70,6 +70,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
self._vmops._migrutils = mock.MagicMock()
self._vmops._serial_console_ops = mock.MagicMock()
self._vmops._block_dev_man = mock.MagicMock()
self._vmops._vif_driver = mock.MagicMock()
@ -1092,29 +1093,42 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
@ddt.data(True, False)
@ddt.data({},
{'vm_exists': True},
{'planned_vm_exists': True})
@ddt.unpack
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
@mock.patch('nova.virt.hyperv.vmops.VMOps.unplug_vifs')
def test_destroy(self, vm_exists, mock_unplug_vifs, mock_power_off,
mock_delete_disk_files, mock_disconnect_volumes):
def test_destroy(self, mock_unplug_vifs, mock_power_off,
mock_delete_disk_files, mock_disconnect_volumes,
vm_exists=False, planned_vm_exists=False):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._migrutils.planned_vm_exists.return_value = (
planned_vm_exists)
self._vmops.destroy(instance=mock_instance,
block_device_info=mock.sentinel.FAKE_BD_INFO,
network_info=mock.sentinel.fake_network_info)
mock_destroy_planned_vms = (
self._vmops._migrutils.destroy_existing_planned_vm)
if vm_exists:
self._vmops._vmutils.stop_vm_jobs.assert_called_once_with(
mock_instance.name)
mock_power_off.assert_called_once_with(mock_instance)
self._vmops._vmutils.destroy_vm.assert_called_once_with(
mock_instance.name)
elif planned_vm_exists:
self._vmops._migrutils.planned_vm_exists.assert_called_once_with(
mock_instance.name)
mock_destroy_planned_vms.assert_called_once_with(
mock_instance.name)
else:
self.assertFalse(mock_power_off.called)
self.assertFalse(self._vmops._vmutils.destroy_vm.called)
self.assertFalse(mock_destroy_planned_vms.called)
self._vmops._vmutils.vm_exists.assert_called_with(
mock_instance.name)

View File

@ -304,8 +304,10 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
self._base_vol_driver._diskutils = mock.Mock()
self._base_vol_driver._vmutils = mock.Mock()
self._base_vol_driver._migrutils = mock.Mock()
self._base_vol_driver._conn = mock.Mock()
self._vmutils = self._base_vol_driver._vmutils
self._migrutils = self._base_vol_driver._migrutils
self._diskutils = self._base_vol_driver._diskutils
self._conn = self._base_vol_driver._conn
@ -452,9 +454,15 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
def test_attach_volume_block_dev(self):
self._test_attach_volume(is_block_dev=True)
def test_detach_volume_planned_vm(self):
self._base_vol_driver.detach_volume(mock.sentinel.connection_info,
mock.sentinel.inst_name)
self._vmutils.detach_vm_disk.assert_not_called()
@mock.patch.object(volumeops.BaseVolumeDriver,
'get_disk_resource_path')
def test_detach_volume(self, mock_get_disk_resource_path):
self._migrutils.planned_vm_exists.return_value = False
connection_info = get_fake_connection_info()
self._base_vol_driver.detach_volume(connection_info,

View File

@ -94,6 +94,7 @@ class VMOps(object):
self._metricsutils = utilsfactory.get_metricsutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._hostutils = utilsfactory.get_hostutils()
self._migrutils = utilsfactory.get_migrationutils()
self._pathutils = pathutils.PathUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@ -738,9 +739,14 @@ class VMOps(object):
self._vmutils.stop_vm_jobs(instance_name)
self.power_off(instance)
self._vmutils.destroy_vm(instance_name)
elif self._migrutils.planned_vm_exists(instance_name):
self._migrutils.destroy_existing_planned_vm(instance_name)
else:
LOG.debug("Instance not found", instance=instance)
# NOTE(claudiub): The vifs should be unplugged and the volumes
# should be disconnected even if the VM doesn't exist anymore,
# so they are not leaked.
self.unplug_vifs(instance, network_info)
self._volumeops.disconnect_volumes(block_device_info)

View File

@ -210,6 +210,7 @@ class BaseVolumeDriver(object):
self._conn = None
self._diskutils = utilsfactory.get_diskutils()
self._vmutils = utilsfactory.get_vmutils()
self._migrutils = utilsfactory.get_migrationutils()
@property
def _connector(self):
@ -277,6 +278,11 @@ class BaseVolumeDriver(object):
slot)
def detach_volume(self, connection_info, instance_name):
if self._migrutils.planned_vm_exists(instance_name):
LOG.warning("Instance %s is a Planned VM, cannot detach "
"volumes from it.", instance_name)
return
disk_path = self.get_disk_resource_path(connection_info)
LOG.debug("Detaching disk %(disk_path)s "