Disable limits if force_hosts or force_nodes is set
Setting max_placement_results will make force_host invaild sometimes,
especially in large-scale enviroment.
Disable limit param in GET /allocation_candidates if force_hosts
or force_nodes is set.
NOTE(xulei): There are differences from the original change because
I496e8d64907fdcb0e2da255725aed1fc529725f2 was not in stable/queens,
so we transplant code to get_allocation_candidates in this backport.
Change-Id: Iff1b49fe7e6347e3c2bb5992494b2450809719a2
Closes-Bug: #1777591
(cherry picked from commit 1d91811ad4
)
This commit is contained in:
parent
d7864fbb9c
commit
ab1fd87ed9
|
@ -344,8 +344,9 @@ class SchedulerReportClient(object):
|
|||
for (rc, amount) in res.items()))
|
||||
qs_params = {
|
||||
'resources': resource_query,
|
||||
'limit': CONF.scheduler.max_placement_results,
|
||||
}
|
||||
if resources._limit is not None:
|
||||
qs_params['limit'] = resources._limit
|
||||
if required_traits:
|
||||
qs_params['required'] = ",".join(required_traits)
|
||||
|
||||
|
|
|
@ -56,6 +56,9 @@ class ResourceRequest(object):
|
|||
def __init__(self):
|
||||
# { ident: RequestGroup }
|
||||
self._rg_by_id = {}
|
||||
# Default to the configured limit but _limit can be
|
||||
# set to None to indicate "no limit".
|
||||
self._limit = CONF.scheduler.max_placement_results
|
||||
|
||||
def get_request_group(self, ident):
|
||||
if ident not in self._rg_by_id:
|
||||
|
@ -333,6 +336,12 @@ def resources_from_request_spec(spec_obj):
|
|||
for rclass, amount in spec_resources.items():
|
||||
res_req.get_request_group(None).resources[rclass] = amount
|
||||
|
||||
# Don't limit allocation candidates when using force_hosts or force_nodes.
|
||||
if 'force_hosts' in spec_obj and spec_obj.force_hosts:
|
||||
res_req._limit = None
|
||||
if 'force_nodes' in spec_obj and spec_obj.force_nodes:
|
||||
res_req._limit = None
|
||||
|
||||
return res_req
|
||||
|
||||
|
||||
|
|
|
@ -1632,6 +1632,36 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
|||
self.assertEqual(expected_query, query)
|
||||
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
|
||||
|
||||
def test_get_allocation_candidates_with_no_limit(self):
|
||||
resp_mock = mock.Mock(status_code=200)
|
||||
json_data = {
|
||||
'allocation_requests': mock.sentinel.alloc_reqs,
|
||||
'provider_summaries': mock.sentinel.p_sums,
|
||||
}
|
||||
resources = scheduler_utils.ResourceRequest.from_extra_specs({
|
||||
'resources:VCPU': '1',
|
||||
'resources:MEMORY_MB': '1024',
|
||||
})
|
||||
resources._limit = None
|
||||
expected_path = '/allocation_candidates'
|
||||
expected_query = {'resources': ['MEMORY_MB:1024,VCPU:1']}
|
||||
|
||||
resp_mock.json.return_value = json_data
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
alloc_reqs, p_sums, allocation_request_version = \
|
||||
self.client.get_allocation_candidates(self.context, resources)
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
mock.ANY, raise_exc=False, microversion='1.17',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
url = self.ks_adap_mock.get.call_args[0][0]
|
||||
split_url = parse.urlsplit(url)
|
||||
query = parse.parse_qs(split_url.query)
|
||||
self.assertEqual(expected_path, split_url.path)
|
||||
self.assertEqual(expected_query, query)
|
||||
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
|
||||
|
||||
def test_get_allocation_candidates_not_found(self):
|
||||
# Ensure _get_resource_provider() just returns None when the placement
|
||||
# API doesn't find a resource provider matching a UUID
|
||||
|
|
|
@ -264,6 +264,41 @@ class TestUtils(test.NoDBTestCase):
|
|||
fake_spec = objects.RequestSpec(flavor=flavor)
|
||||
utils.resources_from_request_spec(fake_spec)
|
||||
|
||||
def test_process_no_force_hosts_or_force_nodes(self):
|
||||
flavor = objects.Flavor(vcpus=1,
|
||||
memory_mb=1024,
|
||||
root_gb=15,
|
||||
ephemeral_gb=0,
|
||||
swap=0)
|
||||
fake_spec = objects.RequestSpec(flavor=flavor)
|
||||
expected = utils.ResourceRequest()
|
||||
resources = utils.resources_from_request_spec(fake_spec)
|
||||
self.assertEqual(expected._limit, resources._limit)
|
||||
|
||||
def test_process_use_force_nodes(self):
|
||||
flavor = objects.Flavor(vcpus=1,
|
||||
memory_mb=1024,
|
||||
root_gb=15,
|
||||
ephemeral_gb=0,
|
||||
swap=0)
|
||||
fake_spec = objects.RequestSpec(flavor=flavor, force_nodes=['test'])
|
||||
expected = utils.ResourceRequest()
|
||||
expected._limit = None
|
||||
resources = utils.resources_from_request_spec(fake_spec)
|
||||
self.assertEqual(expected._limit, resources._limit)
|
||||
|
||||
def test_process_use_force_hosts(self):
|
||||
flavor = objects.Flavor(vcpus=1,
|
||||
memory_mb=1024,
|
||||
root_gb=15,
|
||||
ephemeral_gb=0,
|
||||
swap=0)
|
||||
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
|
||||
expected = utils.ResourceRequest()
|
||||
expected._limit = None
|
||||
resources = utils.resources_from_request_spec(fake_spec)
|
||||
self.assertEqual(expected._limit, resources._limit)
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
||||
return_value=False)
|
||||
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
|
||||
|
|
Loading…
Reference in New Issue