Merge "Include both VCPU and PCPU in core quota count"
This commit is contained in:
commit
4e5889e0c2
|
@ -2332,6 +2332,15 @@ class SchedulerReportClient(object):
|
|||
:raises: `exception.UsagesRetrievalFailed` if a placement API call
|
||||
fails
|
||||
"""
|
||||
def _get_core_usages(usages):
|
||||
"""For backward-compatible with existing behavior, the quota limit
|
||||
on flavor.vcpus. That included the shared and dedicated CPU. So
|
||||
we need to count both the orc.VCPU and orc.PCPU at here.
|
||||
"""
|
||||
vcpus = usages['usages'].get(orc.VCPU, 0)
|
||||
pcpus = usages['usages'].get(orc.PCPU, 0)
|
||||
return vcpus + pcpus
|
||||
|
||||
total_counts = {'project': {}}
|
||||
# First query counts across all users of a project
|
||||
LOG.debug('Getting usages for project_id %s from placement',
|
||||
|
@ -2341,7 +2350,7 @@ class SchedulerReportClient(object):
|
|||
data = resp.json()
|
||||
# The response from placement will not contain a resource class if
|
||||
# there is no usage. We can consider a missing class to be 0 usage.
|
||||
cores = data['usages'].get(orc.VCPU, 0)
|
||||
cores = _get_core_usages(data)
|
||||
ram = data['usages'].get(orc.MEMORY_MB, 0)
|
||||
total_counts['project'] = {'cores': cores, 'ram': ram}
|
||||
else:
|
||||
|
@ -2353,7 +2362,7 @@ class SchedulerReportClient(object):
|
|||
resp = self._get_usages(context, project_id, user_id=user_id)
|
||||
if resp:
|
||||
data = resp.json()
|
||||
cores = data['usages'].get(orc.VCPU, 0)
|
||||
cores = _get_core_usages(data)
|
||||
ram = data['usages'].get(orc.MEMORY_MB, 0)
|
||||
total_counts['user'] = {'cores': cores, 'ram': ram}
|
||||
else:
|
||||
|
|
|
@ -533,3 +533,17 @@ class TestOpenStackClient(object):
|
|||
|
||||
def get_server_diagnostics(self, server_id):
|
||||
return self.api_get('/servers/%s/diagnostics' % server_id).body
|
||||
|
||||
def get_quota_detail(self, project_id=None):
|
||||
if not project_id:
|
||||
project_id = self.project_id
|
||||
return self.api_get(
|
||||
'/os-quota-sets/%s/detail' % project_id).body['quota_set']
|
||||
|
||||
def update_quota(self, quotas, project_id=None):
|
||||
if not project_id:
|
||||
project_id = self.project_id
|
||||
body = {'quota_set': {}}
|
||||
body['quota_set'].update(quotas)
|
||||
return self.api_put(
|
||||
'/os-quota-sets/%s' % project_id, body).body['quota_set']
|
||||
|
|
|
@ -86,6 +86,14 @@ class NUMAServersTest(NUMAServersTestBase):
|
|||
server_ids = [s['id'] for s in servers]
|
||||
self.assertIn(created_server_id, server_ids)
|
||||
|
||||
# Validate the quota usage
|
||||
if end_status == 'ACTIVE':
|
||||
quota_details = self.api.get_quota_detail()
|
||||
expected_core_usages = expected_usage.get(
|
||||
'VCPU', expected_usage.get('PCPU', 0))
|
||||
self.assertEqual(expected_core_usages,
|
||||
quota_details['cores']['in_use'])
|
||||
|
||||
# Validate that NUMATopologyFilter has been called
|
||||
self.assertTrue(self.mock_filter.called)
|
||||
|
||||
|
@ -170,6 +178,41 @@ class NUMAServersTest(NUMAServersTestBase):
|
|||
self.assertEqual(1, len(inst.numa_topology.cells))
|
||||
self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores)
|
||||
|
||||
def test_create_server_with_pinning_quota_fails(self):
|
||||
"""Create a pinned instance on a host with PCPUs but not enough quota.
|
||||
|
||||
This should fail because the quota request should fail.
|
||||
"""
|
||||
|
||||
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
|
||||
cpu_cores=2, cpu_threads=2,
|
||||
kB_mem=15740000)
|
||||
fake_connection = self._get_connection(host_info=host_info)
|
||||
self.mock_conn.return_value = fake_connection
|
||||
|
||||
extra_spec = {
|
||||
'hw:cpu_policy': 'dedicated',
|
||||
'hw:cpu_thread_policy': 'prefer',
|
||||
}
|
||||
flavor_id = self._create_flavor(vcpu=2, extra_spec=extra_spec)
|
||||
|
||||
# Update the core quota less than we requested
|
||||
self.api.update_quota({'cores': 1})
|
||||
|
||||
# NOTE(bhagyashris): Always use host as 'compute1' so that it's
|
||||
# possible to get resource provider information for verifying
|
||||
# compute usages. This host name 'compute1' is hard coded in
|
||||
# Connection class in fakelibvirt.py.
|
||||
# TODO(stephenfin): Remove the hardcoded limit, possibly overridding
|
||||
# 'start_service' to make sure there isn't a mismatch
|
||||
self.compute = self.start_service('compute', host='compute1')
|
||||
|
||||
post = {'server': self._build_server(flavor_id)}
|
||||
|
||||
ex = self.assertRaises(client.OpenStackApiException,
|
||||
self.api.post_server, post)
|
||||
self.assertEqual(403, ex.response.status_code)
|
||||
|
||||
def test_resize_unpinned_to_pinned(self):
|
||||
"""Create an unpinned instance and resize it to a flavor with pinning.
|
||||
|
||||
|
@ -304,6 +347,13 @@ class NUMAServersTest(NUMAServersTestBase):
|
|||
self.assertEqual(expected_usage, compute_usage)
|
||||
|
||||
|
||||
class NUMAServerTestWithCountingQuotaFromPlacement(NUMAServersTest):
|
||||
|
||||
def setUp(self):
|
||||
self.flags(count_usage_from_placement=True, group='quota')
|
||||
super(NUMAServersTest, self).setUp()
|
||||
|
||||
|
||||
class NUMAServersWithNetworksTest(NUMAServersTestBase):
|
||||
|
||||
def setUp(self):
|
||||
|
|
|
@ -4135,3 +4135,16 @@ class TestUsages(SchedulerReportClientTestCase):
|
|||
expected = {'project': {'cores': 0, 'ram': 0},
|
||||
'user': {'cores': 0, 'ram': 0}}
|
||||
self.assertDictEqual(expected, counts)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
|
||||
def test_get_usages_count_with_pcpu(self, mock_get):
|
||||
fake_responses = fake_requests.FakeResponse(
|
||||
200,
|
||||
content=jsonutils.dumps({'usages': {orc.VCPU: 2, orc.PCPU: 2}}))
|
||||
mock_get.return_value = fake_responses
|
||||
counts = self.client.get_usages_counts_for_quota(
|
||||
self.context, 'fake-project', user_id='fake-user')
|
||||
self.assertEqual(2, mock_get.call_count)
|
||||
expected = {'project': {'cores': 4, 'ram': 0},
|
||||
'user': {'cores': 4, 'ram': 0}}
|
||||
self.assertDictEqual(expected, counts)
|
||||
|
|
Loading…
Reference in New Issue