From e221784560c71eeab7c5eeeb42e7c6e910d29340 Mon Sep 17 00:00:00 2001 From: Guang Yee Date: Thu, 18 May 2017 16:38:16 -0700 Subject: [PATCH] make sure to rebuild claim on recreate On recreate where the instance is being evacuated to a different node, we should be rebuilding the claim so the migration context is available when rebuilding the instance. Change-Id: I53bdcf8edf640e97b4632ef7a093f14a6e3845e4 Closes-Bug: 1658070 (cherry picked from commit a2b0824aca5cb4a2ae579f625327c51ed0414d35) --- nova/compute/manager.py | 16 +++++++++++++++- nova/tests/unit/compute/test_compute.py | 2 ++ nova/tests/unit/compute/test_compute_mgr.py | 4 +++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e84c3b833861..1a3c49f01a89 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2714,7 +2714,21 @@ class ComputeManager(manager.Manager): context = context.elevated() LOG.info(_LI("Rebuilding instance"), instance=instance) - if scheduled_node is not None: + + # NOTE(gyee): there are three possible scenarios. + # + # 1. instance is being rebuilt on the same node. In this case, + # recreate should be False and scheduled_node should be None. + # 2. instance is being rebuilt on a node chosen by the + # scheduler (i.e. evacuate). In this case, scheduled_node should + # be specified and recreate should be True. + # 3. instance is being rebuilt on a node chosen by the user. (i.e. + # force evacuate). In this case, scheduled_node is not specified + # and recreate is set to True. + # + # For scenarios #2 and #3, we must do rebuild claim as server is + # being evacuated to a different node. + if recreate or scheduled_node is not None: rt = self._get_resource_tracker() rebuild_claim = rt.rebuild_claim else: diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index e886791e6def..7083a0a9db08 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -11936,9 +11936,11 @@ class EvacuateHostTestCase(BaseTestCase): patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) + self.compute._resource_tracker.rebuild_claim = mock.MagicMock() with patch_spawn, patch_on_disk: self._rebuild(migration=migration) + self.assertTrue(self.compute._resource_tracker.rebuild_claim.called) self.assertEqual('done', migration.status) migration.save.assert_called_once_with() diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 37a0739a7731..7e08d60099fa 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -3035,17 +3035,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): node=dead_node) instance.migration_context = None with test.nested( + mock.patch.object(self.compute, '_get_resource_tracker'), mock.patch.object(self.compute, '_get_compute_info'), mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'), mock.patch.object(objects.Instance, 'save'), mock.patch.object(self.compute, '_set_migration_status') - ) as (mock_get, mock_rebuild, mock_save, mock_set): + ) as (mock_rt, mock_get, mock_rebuild, mock_save, mock_set): mock_get.return_value.hypervisor_hostname = 'new-node' self.compute.rebuild_instance(self.context, instance, None, None, None, None, None, None, True) mock_get.assert_called_once_with(mock.ANY, self.compute.host) self.assertEqual('new-node', instance.node) mock_set.assert_called_once_with(None, 'done') + mock_rt.assert_called_once_with() def test_rebuild_default_impl(self): def _detach(context, bdms):