Always read-deleted=yes on lazy-load

For some reason we were only reading deleted instances when loading generic
fields and not things like flavor. That weird behavior isn't very helpful,
so this makes us always read deleted for that case. Some of the fields, like
tags, will short-circuit that and just immediately lazy-load an empty set.
But for anything else, we should allow reading that data if it's still there.

With this change, we are able to remove a specific read_deleted='yes' usage
from ComputeManager._destroy_evacuated_instances() which is handled with
the generic solution. TestEvacuateDeleteServerRestartOriginalCompute asserts
that the evacuate scenario is still fixed.

Related-Bug: #1794996
Related-Bug: #1745977

Change-Id: I8ec3a3a697e55941ee447d0b52d29785717e4bf0
This commit is contained in:
Dan Smith 2018-06-13 11:14:37 -07:00 committed by Matt Riedemann
parent 92dbeae1d4
commit 604819b29c
5 changed files with 49 additions and 33 deletions

View File

@ -663,14 +663,9 @@ class ComputeManager(manager.Manager):
return
evacuations = {mig.instance_uuid: mig for mig in evacuations}
# The instances might be deleted in which case we need to avoid
# InstanceNotFound being raised from lazy-loading fields on the
# instances while cleaning up this host.
read_deleted_context = context.elevated(read_deleted='yes')
# TODO(mriedem): We could optimize by pre-loading the joined fields
# we know we'll use, like info_cache and flavor. We can also replace
# this with a generic solution: https://review.openstack.org/575190/
local_instances = self._get_instances_on_driver(read_deleted_context)
# we know we'll use, like info_cache and flavor.
local_instances = self._get_instances_on_driver(context)
evacuated = [inst for inst in local_instances
if inst.uuid in evacuations]

View File

@ -899,10 +899,9 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
self.obj_reset_changes()
def _load_generic(self, attrname):
with utils.temporary_mutation(self._context, read_deleted='yes'):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
if attrname not in instance:
# NOTE(danms): Never allow us to recursively-load
@ -1111,6 +1110,24 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
'uuid': self.uuid,
})
with utils.temporary_mutation(self._context, read_deleted='yes'):
self._obj_load_attr(attrname)
def _obj_load_attr(self, attrname):
"""Internal method for loading attributes from instances.
NOTE: Do not use this directly.
This method contains the implementation of lazy-loading attributes
from Instance object, minus some massaging of the context and
error-checking. This should always be called with the object-local
context set for reading deleted instances and with uuid set. All
of the code below depends on those two things. Thus, this should
only be called from obj_load_attr() itself.
:param attrname: The name of the attribute to be loaded
"""
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'fault':

View File

@ -7465,7 +7465,6 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj({'host': 'someotherhost'})
self.compute._instance_update(self.context, instance, vcpus=4)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@mock.patch.object(compute_manager.ComputeManager,
@ -7479,10 +7478,8 @@ class ComputeTestCase(BaseTestCase,
@mock.patch('nova.objects.Migration.save')
def test_destroy_evacuated_instance_on_shared_storage(self, mock_save,
mock_get_filter, mock_destroy, mock_is_inst, mock_get_blk,
mock_get_inst, mock_remove_allocs, mock_elevated):
mock_get_inst, mock_remove_allocs):
fake_context = context.get_admin_context()
read_deleted_context = fake_context.elevated(read_only='yes')
mock_elevated.return_value = read_deleted_context
# instances in central db
instances = [
@ -7519,7 +7516,7 @@ class ComputeTestCase(BaseTestCase,
'pre-migrating',
'done'],
'migration_type': 'evacuation'})
mock_get_inst.assert_called_once_with(read_deleted_context)
mock_get_inst.assert_called_once_with(fake_context)
mock_get_nw.assert_called_once_with(fake_context, evacuated_instance)
mock_get_blk.assert_called_once_with(fake_context, evacuated_instance)
mock_is_inst.assert_called_once_with(fake_context, evacuated_instance)
@ -7530,7 +7527,6 @@ class ComputeTestCase(BaseTestCase,
fake_context, evacuated_instance.uuid,
self.rt.compute_nodes[NODENAME].uuid)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@mock.patch.object(compute_manager.ComputeManager,
@ -7548,11 +7544,8 @@ class ComputeTestCase(BaseTestCase,
@mock.patch('nova.objects.Migration.save')
def test_destroy_evacuated_instance_with_disks(self, mock_save,
mock_get_filter, mock_destroy, mock_check_clean, mock_check,
mock_check_local, mock_get_blk, mock_get_drv, mock_remove_allocs,
mock_elevated):
mock_check_local, mock_get_blk, mock_get_drv, mock_remove_allocs):
fake_context = context.get_admin_context()
read_deleted_context = fake_context.elevated(read_deleted='yes')
mock_elevated.return_value = read_deleted_context
# instances in central db
instances = [
@ -7583,7 +7576,7 @@ class ComputeTestCase(BaseTestCase,
return_value='fake_network_info') as mock_get_nw:
self.compute._destroy_evacuated_instances(fake_context)
mock_get_drv.assert_called_once_with(read_deleted_context)
mock_get_drv.assert_called_once_with(fake_context)
mock_get_nw.assert_called_once_with(fake_context, evacuated_instance)
mock_get_blk.assert_called_once_with(fake_context, evacuated_instance)
mock_check_local.assert_called_once_with(fake_context,
@ -7600,7 +7593,6 @@ class ComputeTestCase(BaseTestCase,
fake_context, evacuated_instance.uuid,
self.rt.compute_nodes[NODENAME].uuid)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@mock.patch.object(compute_manager.ComputeManager,
@ -7617,11 +7609,8 @@ class ComputeTestCase(BaseTestCase,
@mock.patch('nova.objects.Migration.save')
def test_destroy_evacuated_instance_not_implemented(self, mock_save,
mock_get_filter, mock_destroy, mock_check_clean, mock_check,
mock_check_local, mock_get_blk, mock_get_inst, mock_remove_allocs,
mock_elevated):
mock_check_local, mock_get_blk, mock_get_inst, mock_remove_allocs):
fake_context = context.get_admin_context()
read_deleted_context = fake_context.elevated(read_deleted='yes')
mock_elevated.return_value = read_deleted_context
# instances in central db
instances = [
@ -7651,7 +7640,7 @@ class ComputeTestCase(BaseTestCase,
return_value='fake_network_info') as mock_get_nw:
self.compute._destroy_evacuated_instances(fake_context)
mock_get_inst.assert_called_once_with(read_deleted_context)
mock_get_inst.assert_called_once_with(fake_context)
mock_get_nw.assert_called_once_with(fake_context, evacuated_instance)
mock_get_blk.assert_called_once_with(fake_context, evacuated_instance)
mock_check_local.assert_called_once_with(fake_context,

View File

@ -848,7 +848,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute.init_virt_events()
self.assertFalse(mock_register.called)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.scheduler.utils.resources_from_flavor')
@mock.patch.object(manager.ComputeManager, '_get_instances_on_driver')
@ -863,12 +862,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get,
mock_temp_mut, mock_init_host, mock_destroy, mock_host_get,
mock_admin_ctxt, mock_init_virt, mock_get_inst, mock_resources,
mock_get_node, mock_elevated):
mock_get_node):
our_host = self.compute.host
not_our_host = 'not-' + our_host
read_deleted_context = self.context.elevated(read_deleted='yes')
mock_elevated.return_value = read_deleted_context
deleted_instance = fake_instance.fake_instance_obj(
self.context, host=not_our_host, uuid=uuids.deleted_instance)
migration = objects.Migration(instance_uuid=deleted_instance.uuid)
@ -902,7 +899,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
expected_attrs=['info_cache', 'metadata'])
mock_init_virt.assert_called_once_with()
mock_temp_mut.assert_called_once_with(self.context, read_deleted='yes')
mock_get_inst.assert_called_once_with(read_deleted_context)
mock_get_inst.assert_called_once_with(self.context)
mock_get_net.assert_called_once_with(self.context, deleted_instance)
# ensure driver.destroy is called so that driver may

View File

@ -242,6 +242,24 @@ class _TestInstanceObject(object):
self.assertNotIn('system_metadata', instance)
self.assertEqual(0, len(instance.system_metadata))
def test_lazy_load_flavor_on_deleted_instance(self):
# For something like a flavor, we should be reading from the DB
# with read_deleted='yes'
flavor = objects.Flavor(name='testflavor')
instance = objects.Instance(self.context, uuid=uuids.instance,
flavor=flavor,
user_id=self.context.user_id,
project_id=self.context.project_id)
instance.create()
instance.destroy()
# Re-create our local object to make sure it doesn't have sysmeta
# filled in by create()
instance = objects.Instance(self.context, uuid=uuids.instance,
user_id=self.context.user_id,
project_id=self.context.project_id)
self.assertNotIn('flavor', instance)
self.assertEqual('testflavor', instance.flavor.name)
def test_lazy_load_tags(self):
instance = objects.Instance(self.context, uuid=uuids.instance,
user_id=self.context.user_id,