Update quota sets APIs
Ensure the limit related APIs reflect the new reality of enforcing the API and DB limits based on keystone only. For now we skip all updates to the DB, as none mean anything to the new code, as we only look at keystone now. Note: this will need to be updated again once we add limits for cores, ram, instances, etc. blueprint unified-limits-nova Change-Id: I5ef968395b4bdc6f190e239a19a723316b1d5baf
This commit is contained in:
parent
ce4f796ec1
commit
c384824683
|
@ -29,6 +29,7 @@ from nova.api import validation
|
|||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.limit import utils as limit_utils
|
||||
from nova import objects
|
||||
from nova.policies import quota_sets as qs_policies
|
||||
from nova import quota
|
||||
|
@ -205,10 +206,16 @@ class QuotaSetsController(wsgi.Controller):
|
|||
settable_quotas = QUOTAS.get_settable_quotas(context, project_id,
|
||||
user_id=user_id)
|
||||
|
||||
requested_quotas = body['quota_set'].items()
|
||||
if limit_utils.use_unified_limits():
|
||||
# NOTE(johngarbutt) currently all info comes from keystone
|
||||
# we don't update the database.
|
||||
requested_quotas = []
|
||||
|
||||
# NOTE(dims): Pass #1 - In this loop for quota_set.items(), we validate
|
||||
# min/max values and bail out if any of the items in the set is bad.
|
||||
valid_quotas = {}
|
||||
for key, value in body['quota_set'].items():
|
||||
for key, value in requested_quotas:
|
||||
if key == 'force' or (not value and value != 0):
|
||||
continue
|
||||
# validate whether already used and reserved exceeds the new
|
||||
|
@ -276,8 +283,12 @@ class QuotaSetsController(wsgi.Controller):
|
|||
context.can(qs_policies.POLICY_ROOT % 'delete', {'project_id': id})
|
||||
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
|
||||
user_id = params.get('user_id', [None])[0]
|
||||
if user_id:
|
||||
objects.Quotas.destroy_all_by_project_and_user(
|
||||
context, id, user_id)
|
||||
else:
|
||||
objects.Quotas.destroy_all_by_project(context, id)
|
||||
|
||||
# NOTE(johngarbutt) with unified limits we only use keystone, not the
|
||||
# db
|
||||
if not limit_utils.use_unified_limits():
|
||||
if user_id:
|
||||
objects.Quotas.destroy_all_by_project_and_user(
|
||||
context, id, user_id)
|
||||
else:
|
||||
objects.Quotas.destroy_all_by_project(context, id)
|
||||
|
|
|
@ -801,7 +801,10 @@ class UnifiedLimitsDriver(NoopQuotaDriver):
|
|||
:param resources: A dictionary of the registered resources.
|
||||
:param quota_class: Placeholder, we always assume default quota class.
|
||||
"""
|
||||
# NOTE(johngarbutt): ignoring quota_class, as ignored in noop driver
|
||||
return self.get_defaults(context, resources)
|
||||
|
||||
def get_defaults(self, context, resources):
|
||||
local_limits = local_limit.get_legacy_default_limits()
|
||||
# TODO(melwitt): This is temporary when we are in a state where cores,
|
||||
# ram, and instances quota limits are not known/enforced with unified
|
||||
|
|
|
@ -1012,9 +1012,7 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
|
|||
}
|
||||
}
|
||||
self.assertEqual(expected_response, response)
|
||||
mock_create.assert_called_once_with(req.environ['nova.context'],
|
||||
uuids.project_id, "server_groups",
|
||||
2, user_id=None)
|
||||
self.assertEqual(0, mock_create.call_count)
|
||||
|
||||
@mock.patch.object(objects.Quotas, "create_limit")
|
||||
def test_update_v21_user(self, mock_create):
|
||||
|
@ -1040,6 +1038,75 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
|
|||
}
|
||||
}
|
||||
self.assertEqual(expected_response, response)
|
||||
mock_create.assert_called_once_with(req.environ['nova.context'],
|
||||
uuids.project_id, "key_pairs", 52,
|
||||
user_id="42")
|
||||
self.assertEqual(0, mock_create.call_count)
|
||||
|
||||
def test_defaults_v21(self):
|
||||
req = fakes.HTTPRequest.blank("")
|
||||
response = self.controller.defaults(req, uuids.project_id)
|
||||
expected_response = {
|
||||
'quota_set': {
|
||||
'id': uuids.project_id,
|
||||
'cores': -1,
|
||||
'fixed_ips': -1,
|
||||
'floating_ips': -1,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_files': 5,
|
||||
'instances': -1,
|
||||
'key_pairs': 100,
|
||||
'metadata_items': 128,
|
||||
'ram': -1,
|
||||
'security_group_rules': -1,
|
||||
'security_groups': -1,
|
||||
'server_group_members': 10,
|
||||
'server_groups': 12,
|
||||
}
|
||||
}
|
||||
self.assertEqual(expected_response, response)
|
||||
|
||||
def test_defaults_v21_different_limit_values(self):
|
||||
reglimits = {local_limit.SERVER_METADATA_ITEMS: 7,
|
||||
local_limit.INJECTED_FILES: 6,
|
||||
local_limit.INJECTED_FILES_CONTENT: 4,
|
||||
local_limit.INJECTED_FILES_PATH: 5,
|
||||
local_limit.KEY_PAIRS: 1,
|
||||
local_limit.SERVER_GROUPS: 3,
|
||||
local_limit.SERVER_GROUP_MEMBERS: 2}
|
||||
self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
|
||||
|
||||
req = fakes.HTTPRequest.blank("")
|
||||
response = self.controller.defaults(req, uuids.project_id)
|
||||
expected_response = {
|
||||
'quota_set': {
|
||||
'id': uuids.project_id,
|
||||
'cores': -1,
|
||||
'fixed_ips': -1,
|
||||
'floating_ips': -1,
|
||||
'injected_file_content_bytes': 4,
|
||||
'injected_file_path_bytes': 5,
|
||||
'injected_files': 6,
|
||||
'instances': -1,
|
||||
'key_pairs': 1,
|
||||
'metadata_items': 7,
|
||||
'ram': -1,
|
||||
'security_group_rules': -1,
|
||||
'security_groups': -1,
|
||||
'server_group_members': 2,
|
||||
'server_groups': 3,
|
||||
}
|
||||
}
|
||||
self.assertEqual(expected_response, response)
|
||||
|
||||
@mock.patch('nova.objects.Quotas.destroy_all_by_project')
|
||||
def test_quotas_delete(self, mock_destroy_all_by_project):
|
||||
req = fakes.HTTPRequest.blank("")
|
||||
self.controller.delete(req, "1234")
|
||||
# Ensure destroy isn't called for unified limits
|
||||
self.assertEqual(0, mock_destroy_all_by_project.call_count)
|
||||
|
||||
@mock.patch('nova.objects.Quotas.destroy_all_by_project_and_user')
|
||||
def test_user_quotas_delete(self, mock_destroy_all_by_user):
|
||||
req = fakes.HTTPRequest.blank("?user_id=42")
|
||||
self.controller.delete(req, "1234")
|
||||
# Ensure destroy isn't called for unified limits
|
||||
self.assertEqual(0, mock_destroy_all_by_user.call_count)
|
||||
|
|
|
@ -1966,6 +1966,23 @@ class UnifiedLimitsDriverTestCase(NoopQuotaDriverTestCase):
|
|||
local_limit.SERVER_GROUPS: 12,
|
||||
local_limit.SERVER_GROUP_MEMBERS: 10}
|
||||
self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
|
||||
|
||||
self.expected_without_dict = {
|
||||
'cores': -1,
|
||||
'fixed_ips': -1,
|
||||
'floating_ips': -1,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_files': 5,
|
||||
'instances': -1,
|
||||
'key_pairs': 100,
|
||||
'metadata_items': 128,
|
||||
'ram': -1,
|
||||
'security_group_rules': -1,
|
||||
'security_groups': -1,
|
||||
'server_group_members': 10,
|
||||
'server_groups': 12,
|
||||
}
|
||||
self.expected_without_usages = {
|
||||
'cores': {'limit': -1},
|
||||
'fixed_ips': {'limit': -1},
|
||||
|
|
Loading…
Reference in New Issue