Skip placement on rebuild in same host

If the instance is rebuilt with a different image in the same
host, we don't need to call placement because there is no change
in resource consumption.

Conflicts:
        nova/scheduler/manager.py
        nova/tests/fixtures.py
        nova/tests/unit/scheduler/test_scheduler.py

NOTE(lyarwood): Pike Conflicts due to the following changes not being
present until Queens Iae904afb6cb4fcea8bb27741d774ffbe986a5fb4,
I03b95a2106624c2ea24835814ca38e954ec7a997 and
Iacb9808ef7188e3419abfac9e8c5fb5a46c71c05.

Change-Id: Ie252271ecfd38a0a1c61c26e323cc03869889f0a
Closes-Bug: #1750623
(cherry-pick from c3a894b6af)
(cherry picked from commit 04ea910ad1)
This commit is contained in:
Hongbin Lu 2018-02-20 22:05:42 +00:00 committed by Lee Yarwood
parent d75b995d03
commit ffbf5918c6
4 changed files with 34 additions and 1 deletions

View File

@ -119,8 +119,9 @@ class SchedulerManager(manager.Manager):
request_spec,
filter_properties)
resources = utils.resources_from_request_spec(spec_obj)
is_rebuild = utils.request_is_rebuild(spec_obj)
alloc_reqs_by_rp_uuid, provider_summaries = None, None
if self.driver.USES_ALLOCATION_CANDIDATES:
if self.driver.USES_ALLOCATION_CANDIDATES and not is_rebuild:
res = self.placement_client.get_allocation_candidates(resources)
if res is None:
# We have to handle the case that we failed to connect to the

View File

@ -1446,6 +1446,10 @@ class PlacementApiClient(object):
def get(self, url):
return client.APIResponse(self.fixture._fake_get(None, url))
def put(self, url, body, **kwargs):
return client.APIResponse(
self.fixture._fake_put(None, url, body, **kwargs))
class PlacementFixture(fixtures.Fixture):
"""A fixture to placement operations.

View File

@ -1186,6 +1186,15 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
return self.placement_api.get(
'/allocations/%s' % server_uuid).body['allocations']
def _set_provider_inventory(rp_uuid, resource_class, inventory):
# Get the resource provider generation for the inventory update.
rp = self.placement_api.get(
'/resource_providers/%s' % rp_uuid).body
inventory['resource_provider_generation'] = rp['generation']
return self.placement_api.put(
'/resource_providers/%s/inventories/%s' %
(rp_uuid, resource_class), inventory).body
def assertFlavorMatchesAllocation(flavor, allocation):
self.assertEqual(flavor['vcpus'], allocation['VCPU'])
self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
@ -1213,6 +1222,9 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
flavor = self.api.api_get('/flavors/1').body['flavor']
# make the compute node full and ensure rebuild still succeed
_set_provider_inventory(rp_uuid, "VCPU", {"total": 1})
# There should be usage for the server on the compute node now.
rp_usages = _get_provider_usages(rp_uuid)
assertFlavorMatchesAllocation(flavor, rp_usages)

View File

@ -168,6 +168,22 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
place_res = ([], {})
self._test_select_destination(place_res)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
def test_select_destination_is_rebuild(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec(
scheduler_hints={'_nova_check_type': ['rebuild']})
fake_spec.instance_uuid = uuids.instance
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(self.context, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(
self.context, fake_spec,
[fake_spec.instance_uuid], None, None)
mock_get_ac.assert_not_called()
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')