delete allocation of evacuated instance

After evacuation the instance has allocations on both the source and
the destination computes. This is OK as the source compute is down.
However after the source compute is brought up the allocation from
the source host needs to be cleaned up.

Closes-Bug: #1709902
Change-Id: I0df401a7c91f012fdb25cb0e6b344ca51de8c309
This commit is contained in:
Balazs Gibizer 2017-08-11 11:40:35 +02:00
parent fefff942ef
commit 9b9c2c52f3
6 changed files with 57 additions and 19 deletions

View File

@ -668,6 +668,11 @@ class ComputeManager(manager.Manager):
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
rt = self._get_resource_tracker()
rt.delete_allocation_for_evacuated_instance(
instance, migration.source_node)
migration.status = 'completed'
migration.save()

View File

@ -1210,6 +1210,21 @@ class ResourceTracker(object):
"host that might need to be removed: %s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, instance, node):
# Clean up the instance allocation from this node in placement
my_resources = scheduler_utils.resources_from_flavor(
instance, instance.flavor)
cn_uuid = self.compute_nodes[node].uuid
res = self.reportclient.remove_provider_from_instance_allocation(
instance.uuid, cn_uuid, instance.user_id,
instance.project_id, my_resources)
if not res:
LOG.error("Failed to clean allocation of an evacuated "
"instance on the source node %s",
cn_uuid, instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine

View File

@ -1641,27 +1641,16 @@ class ServerMovingTests(test.TestCase, integrated_helpers.InstanceHelperMixin):
source_compute_id, {'forced_down': 'false'})
source_usages = self._get_provider_usages(source_rp_uuid)
# NOTE(gibi): this is bug 1709902 as the source compute does not clean
# up the allocation during init_host
self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
# after fixing bug 1709902 the following is expected
# self.assertEqual({'VCPU': 0,
# 'MEMORY_MB': 0,
# 'DISK_GB': 0},
# source_usages)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0},
source_usages)
dest_usages = self._get_provider_usages(dest_rp_uuid)
self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
allocations = self._get_allocations_by_server_uuid(server['id'])
# NOTE(gibi): this is bug 1709902 as the source compute does not clean
# up the allocation during init_host
self.assertEqual(2, len(allocations))
source_allocation = allocations[source_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
# after fixing bug 1709902 the following is expected
# self.assertEqual(1, len(allocations))
self.assertEqual(1, len(allocations))
dest_allocation = allocations[dest_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)

View File

@ -7091,6 +7091,7 @@ class ComputeTestCase(BaseTestCase,
{'host': 'otherhost'})
migration = objects.Migration(instance_uuid=evacuated_instance.uuid)
migration.source_node = NODENAME
mock_get_filter.return_value = [migration]
instances.append(evacuated_instance)
mock_get_inst.return_value = instances
@ -7148,6 +7149,7 @@ class ComputeTestCase(BaseTestCase,
{'host': 'otherhost'})
migration = objects.Migration(instance_uuid=evacuated_instance.uuid)
migration.source_node = NODENAME
mock_get_filter.return_value = [migration]
instances.append(evacuated_instance)
mock_get_drv.return_value = instances
@ -7207,6 +7209,7 @@ class ComputeTestCase(BaseTestCase,
{'host': 'otherhost'})
migration = objects.Migration(instance_uuid=evacuated_instance.uuid)
migration.source_node = NODENAME
mock_get_filter.return_value = [migration]
instances.append(evacuated_instance)
mock_get_inst.return_value = instances

View File

@ -652,6 +652,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute.init_virt_events()
self.assertFalse(mock_register.called)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'delete_allocation_for_evacuated_instance')
@mock.patch.object(manager.ComputeManager, '_get_instances_on_driver')
@mock.patch.object(manager.ComputeManager, 'init_virt_events')
@mock.patch.object(context, 'get_admin_context')
@ -663,13 +665,14 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
@mock.patch('nova.objects.Migration.save')
def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get,
mock_temp_mut, mock_init_host, mock_destroy, mock_host_get,
mock_admin_ctxt, mock_init_virt, mock_get_inst):
mock_admin_ctxt, mock_init_virt, mock_get_inst, mock_delete_alloc):
our_host = self.compute.host
not_our_host = 'not-' + our_host
deleted_instance = fake_instance.fake_instance_obj(
self.context, host=not_our_host, uuid=uuids.deleted_instance)
migration = objects.Migration(instance_uuid=deleted_instance.uuid)
migration.source_node = 'fake-node'
mock_mig_get.return_value = [migration]
mock_admin_ctxt.return_value = self.context
mock_host_get.return_value = objects.InstanceList()
@ -695,6 +698,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_destroy.assert_called_once_with(self.context, deleted_instance,
mock.ANY, mock.ANY, mock.ANY)
mock_save.assert_called_once_with()
mock_delete_alloc.assert_called_once_with(
deleted_instance, migration.source_node)
def test_init_instance_with_binding_failed_vif_type(self):
# this instance will plug a 'binding_failed' vif
@ -3300,6 +3305,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
migration = objects.Migration(instance_uuid=instance_2.uuid)
# Consider the migration successful
migration.status = 'done'
migration.source_node = 'fake-node'
with test.nested(
mock.patch.object(self.compute, '_get_instances_on_driver',
@ -3313,16 +3319,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
return_value=False),
mock.patch.object(self.compute.driver, 'destroy'),
mock.patch('nova.objects.MigrationList.get_by_filters'),
mock.patch('nova.objects.Migration.save')
mock.patch('nova.objects.Migration.save'),
mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'delete_allocation_for_evacuated_instance')
) as (_get_instances_on_driver, get_instance_nw_info,
_get_instance_block_device_info, _is_instance_storage_shared,
destroy, migration_list, migration_save):
destroy, migration_list, migration_save, remove_allocation):
migration_list.return_value = [migration]
self.compute._destroy_evacuated_instances(self.context)
# Only instance 2 should be deleted. Instance 1 is still running
# here, but no migration from our host exists, so ignore it
destroy.assert_called_once_with(self.context, instance_2, None,
{}, True)
remove_allocation.assert_called_once_with(
instance_2, migration.source_node)
@mock.patch('nova.compute.manager.ComputeManager.'
'_destroy_evacuated_instances')

View File

@ -2561,6 +2561,22 @@ class TestUpdateUsageFromInstance(BaseTestCase):
self.assertEqual(-1024, cn.free_ram_mb)
self.assertEqual(-1, cn.free_disk_gb)
@mock.patch('nova.scheduler.utils.resources_from_flavor')
def test_delete_allocation_for_evacuated_instance(
self, mock_resource_from_flavor):
mock_resource = mock.Mock()
mock_resource_from_flavor.return_value = mock_resource
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.uuid = uuids.inst0
self.rt.delete_allocation_for_evacuated_instance(instance, _NODENAME)
rc = self.rt.reportclient
mock_remove_allocation = rc.remove_provider_from_instance_allocation
mock_remove_allocation.assert_called_once_with(
instance.uuid, self.rt.compute_nodes[_NODENAME].uuid,
instance.user_id, instance.project_id, mock_resource)
class TestInstanceInResizeState(test.NoDBTestCase):
def test_active_suspending(self):