make sure to rebuild claim on recreate
On recreate where the instance is being evacuated to a different node, we should be rebuilding the claim so the migration context is available when rebuilding the instance. Change-Id: I53bdcf8edf640e97b4632ef7a093f14a6e3845e4 Closes-Bug: 1658070
This commit is contained in:
parent
39b67842d0
commit
a2b0824aca
nova
@ -2764,7 +2764,21 @@ class ComputeManager(manager.Manager):
|
||||
context = context.elevated()
|
||||
|
||||
LOG.info(_LI("Rebuilding instance"), instance=instance)
|
||||
if scheduled_node is not None:
|
||||
|
||||
# NOTE(gyee): there are three possible scenarios.
|
||||
#
|
||||
# 1. instance is being rebuilt on the same node. In this case,
|
||||
# recreate should be False and scheduled_node should be None.
|
||||
# 2. instance is being rebuilt on a node chosen by the
|
||||
# scheduler (i.e. evacuate). In this case, scheduled_node should
|
||||
# be specified and recreate should be True.
|
||||
# 3. instance is being rebuilt on a node chosen by the user. (i.e.
|
||||
# force evacuate). In this case, scheduled_node is not specified
|
||||
# and recreate is set to True.
|
||||
#
|
||||
# For scenarios #2 and #3, we must do rebuild claim as server is
|
||||
# being evacuated to a different node.
|
||||
if recreate or scheduled_node is not None:
|
||||
rt = self._get_resource_tracker()
|
||||
rebuild_claim = rt.rebuild_claim
|
||||
else:
|
||||
|
@ -12196,9 +12196,11 @@ class EvacuateHostTestCase(BaseTestCase):
|
||||
patch_spawn = mock.patch.object(self.compute.driver, 'spawn')
|
||||
patch_on_disk = mock.patch.object(
|
||||
self.compute.driver, 'instance_on_disk', return_value=True)
|
||||
self.compute._resource_tracker.rebuild_claim = mock.MagicMock()
|
||||
with patch_spawn, patch_on_disk:
|
||||
self._rebuild(migration=migration)
|
||||
|
||||
self.assertTrue(self.compute._resource_tracker.rebuild_claim.called)
|
||||
self.assertEqual('done', migration.status)
|
||||
migration.save.assert_called_once_with()
|
||||
|
||||
|
@ -3138,17 +3138,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
node=dead_node)
|
||||
instance.migration_context = None
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute, '_get_resource_tracker'),
|
||||
mock.patch.object(self.compute, '_get_compute_info'),
|
||||
mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
|
||||
mock.patch.object(objects.Instance, 'save'),
|
||||
mock.patch.object(self.compute, '_set_migration_status')
|
||||
) as (mock_get, mock_rebuild, mock_save, mock_set):
|
||||
) as (mock_rt, mock_get, mock_rebuild, mock_save, mock_set):
|
||||
mock_get.return_value.hypervisor_hostname = 'new-node'
|
||||
self.compute.rebuild_instance(self.context, instance, None, None,
|
||||
None, None, None, None, True)
|
||||
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
|
||||
self.assertEqual('new-node', instance.node)
|
||||
mock_set.assert_called_once_with(None, 'done')
|
||||
mock_rt.assert_called_once_with()
|
||||
|
||||
def test_rebuild_default_impl(self):
|
||||
def _detach(context, bdms):
|
||||
|
Loading…
x
Reference in New Issue
Block a user