Enforce api and db limits

When using unified limits, we add enforcement of those limits on all
related API calls. Note: we do not yet correctly report the configured
limits to users via the quota APIs, that is in a future patch.

Note the unified limits calls are made alongside the existing legacy
quota calls. The old quota calls will be handed by the quota engine
driver, that is basically a no-op. This is to make it easier to remove
the legacy code paths in the future.

Note, over quota exceptions raised with unified limits use the standard
(improved) exception message as those raised by oslo.limit. They
however do use the existing exception code to ease integration. The
user of the API will see the same return codes, no matter which code is
enabled to enforce the limits.

Finally, this also adds test coverage where it was missing. Coverage
for "quota recheck" behavior in KeypairAPI is added where all other
KeypairAPI testing is located. Duplicate coverage is removed from
nova/api/openstack/compute/test_keypairs.py at the same time.

blueprint unified-limits-nova

Change-Id: I36e82a17579158063396d7e55b495ccff4959ceb
This commit is contained in:
John Garbutt 2020-03-10 10:25:42 +00:00 committed by melanie witt
parent 3b69f959a8
commit 4207493829
8 changed files with 432 additions and 47 deletions

View File

@ -30,6 +30,7 @@ import nova.conf
from nova import context as nova_context
import nova.exception
from nova.i18n import _
from nova.limit import local as local_limit
from nova import objects
from nova.objects import service
from nova.policies import server_groups as sg_policies
@ -191,6 +192,10 @@ class ServerGroupController(wsgi.Controller):
try:
objects.Quotas.check_deltas(context, {'server_groups': 1},
project_id, context.user_id)
local_limit.enforce_db_limit(context, local_limit.SERVER_GROUPS,
entity_scope=project_id, delta=1)
except nova.exception.ServerGroupLimitExceeded as e:
raise exc.HTTPForbidden(explanation=str(e))
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
@ -231,6 +236,16 @@ class ServerGroupController(wsgi.Controller):
objects.Quotas.check_deltas(context, {'server_groups': 0},
project_id,
context.user_id)
# TODO(johngarbutt): decide if we need this recheck
# The quota rechecking of limits is really just to protect
# against denial of service attacks that aim to fill up the
# database. Its usefulness could be debated.
local_limit.enforce_db_limit(context,
local_limit.SERVER_GROUPS,
project_id, delta=0)
except nova.exception.ServerGroupLimitExceeded as e:
sg.destroy()
raise exc.HTTPForbidden(explanation=str(e))
except nova.exception.OverQuota:
sg.destroy()
msg = _("Quota exceeded, too many server groups.")

View File

@ -59,6 +59,7 @@ from nova import exception
from nova import exception_wrapper
from nova.i18n import _
from nova.image import glance
from nova.limit import local as local_limit
from nova.network import constants
from nova.network import model as network_model
from nova.network import neutron
@ -409,6 +410,10 @@ class API:
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
local_limit.enforce_api_limit(local_limit.INJECTED_FILES,
len(injected_files))
except exception.OnsetFileLimitExceeded:
raise
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
@ -424,6 +429,16 @@ class API:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
# TODO(johngarbutt) we can simplify the except clause when
# the above legacy quota check is removed.
local_limit.enforce_api_limit(
local_limit.INJECTED_FILES_PATH, max_path)
local_limit.enforce_api_limit(
local_limit.INJECTED_FILES_CONTENT, max_content)
except exception.OnsetFilePathLimitExceeded:
raise
except exception.OnsetFileContentLimitExceeded:
raise
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
@ -444,6 +459,10 @@ class API:
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
local_limit.enforce_api_limit(
local_limit.SERVER_METADATA_ITEMS, num_metadata)
except exception.MetadataLimitExceeded:
raise
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
@ -1451,6 +1470,11 @@ class API:
objects.Quotas.check_deltas(
context, {'server_group_members': 1},
instance_group, context.user_id)
local_limit.enforce_db_limit(
context, local_limit.SERVER_GROUP_MEMBERS,
entity_scope=instance_group.uuid, delta=1)
except exception.GroupMemberLimitExceeded:
raise
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
@ -1469,6 +1493,19 @@ class API:
objects.Quotas.check_deltas(
context, {'server_group_members': 0},
instance_group, context.user_id)
# TODO(johngarbutt): decide if we need this check
# The quota rechecking of limits is really just to
# protect against denial of service attacks that
# aim to fill up the database. Its usefulness could
# be debated.
local_limit.enforce_db_limit(
context, local_limit.SERVER_GROUP_MEMBERS,
entity_scope=instance_group.uuid, delta=0)
except exception.GroupMemberLimitExceeded:
with excutils.save_and_reraise_exception():
objects.InstanceGroup._remove_members_in_db(
context, instance_group.id,
[instance.uuid])
except exception.OverQuota:
objects.InstanceGroup._remove_members_in_db(
context, instance_group.id, [instance.uuid])
@ -6551,6 +6588,10 @@ class KeypairAPI:
'1 and 255 characters long'))
try:
objects.Quotas.check_deltas(context, {'key_pairs': 1}, user_id)
local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS,
entity_scope=user_id, delta=1)
except exception.KeypairLimitExceeded:
raise
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@ -6623,6 +6664,15 @@ class KeypairAPI:
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'key_pairs': 0}, user_id)
# TODO(johngarbutt) do we really need this recheck?
# The quota rechecking of limits is really just to protect
# against denial of service attacks that aim to fill up the
# database. Its usefulness could be debated.
local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS,
entity_scope=user_id, delta=0)
except exception.KeypairLimitExceeded:
with excutils.save_and_reraise_exception():
keypair.destroy()
except exception.OverQuota:
keypair.destroy()
raise exception.KeypairLimitExceeded()

View File

@ -45,6 +45,11 @@ API_LIMITS = set([
KEY_PAIRS = "server_key_pairs"
SERVER_GROUPS = "server_groups"
SERVER_GROUP_MEMBERS = "server_group_members"
DB_LIMITS = set([
KEY_PAIRS,
SERVER_GROUPS,
SERVER_GROUP_MEMBERS,
])
# Checks only happen when we are using the unified limits driver
UNIFIED_LIMITS_DRIVER = "nova.quota.UnifiedLimitsDriver"
@ -62,6 +67,42 @@ EXCEPTIONS = {
SERVER_GROUP_MEMBERS: exception.GroupMemberLimitExceeded,
}
# Map new limit-based quota names to the legacy ones.
LEGACY_LIMITS = {
SERVER_METADATA_ITEMS: "metadata_items",
INJECTED_FILES: "injected_files",
INJECTED_FILES_CONTENT: "injected_file_content_bytes",
INJECTED_FILES_PATH: "injected_file_path_bytes",
KEY_PAIRS: "key_pairs",
SERVER_GROUPS: SERVER_GROUPS,
SERVER_GROUP_MEMBERS: SERVER_GROUP_MEMBERS,
}
def get_in_use(context, project_id):
"""Returns in use counts for each resource, for given project.
This sounds simple but many resources can't be counted per project,
so the only sensible value is 0. For example, key pairs are counted
per user, and server group members are counted per server group,
and metadata items are counted per server.
This behaviour is consistent with what is returned today by the
DB based quota driver.
"""
count = _server_group_count(context, project_id)['server_groups']
usages = {
# DB limits
SERVER_GROUPS: count,
SERVER_GROUP_MEMBERS: 0,
KEY_PAIRS: 0,
# API limits
SERVER_METADATA_ITEMS: 0,
INJECTED_FILES: 0,
INJECTED_FILES_CONTENT: 0,
INJECTED_FILES_PATH: 0,
}
return _convert_keys_to_legacy_name(usages)
def always_zero_usage(
project_id: str, resource_names: ty.List[str]
@ -150,6 +191,21 @@ def enforce_db_limit(
raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e))
def _convert_keys_to_legacy_name(new_dict):
legacy = {}
for new_name, old_name in LEGACY_LIMITS.items():
# defensive incase oslo or keystone doesn't give us an answer
legacy[old_name] = new_dict.get(new_name) or 0
return legacy
def get_legacy_default_limits():
# TODO(johngarbutt): need oslo.limit API for this, it should do caching
enforcer = limit.Enforcer(lambda: None)
new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())
return _convert_keys_to_legacy_name(dict(new_limits))
def _keypair_count(context, user_id, *args):
count = objects.KeyPairList.get_count_by_user(context, user_id)
return {'server_key_pairs': count}

View File

@ -228,50 +228,6 @@ class KeypairsTestV21(test.TestCase):
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_over_quota_during_recheck(self, mock_check):
# Simulate a race where the first check passes and the recheck fails.
# First check occurs in compute/api.
exc = exception.OverQuota(overs='key_pairs', usages={'key_pairs': 100})
mock_check.side_effect = [None, exc]
body = {
'keypair': {
'name': 'FAKE',
},
}
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
ctxt = self.req.environ['nova.context']
self.assertEqual(2, mock_check.call_count)
call1 = mock.call(ctxt, {'key_pairs': 1}, ctxt.user_id)
call2 = mock.call(ctxt, {'key_pairs': 0}, ctxt.user_id)
mock_check.assert_has_calls([call1, call2])
# Verify we removed the key pair that was added after the first
# quota check passed.
key_pairs = objects.KeyPairList.get_by_user(ctxt, ctxt.user_id)
names = [key_pair.name for key_pair in key_pairs]
self.assertNotIn('create_test', names)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_no_quota_recheck(self, mock_check):
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
body = {
'keypair': {
'name': 'create_test',
},
}
self.controller.create(self.req, body=body)
ctxt = self.req.environ['nova.context']
# check_deltas should have been called only once.
mock_check.assert_called_once_with(ctxt, {'key_pairs': 1},
ctxt.user_id)
def test_keypair_create_duplicate(self):
self.stub_out("nova.objects.KeyPair.create",
db_key_pair_create_duplicate)

View File

@ -15,12 +15,15 @@
import mock
from oslo_config import cfg
from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
from nova import exception
from nova.limit import local as local_limit
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
@ -116,14 +119,41 @@ class ServerGroupQuotasTestV21(test.TestCase):
self.controller.create,
self.req, body={'server_group': sgroup})
def _test_create_server_group_during_recheck(self, mock_method):
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
e = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
self.req, body={'server_group': sgroup})
self.assertEqual(2, mock_method.call_count)
return e
@mock.patch('nova.objects.Quotas.check_deltas')
def test_create_server_group_recheck_disabled(self, mock_check):
def test_create_server_group_during_recheck(self, mock_check):
"""Simulate a race where this request initially has enough quota to
progress partially through the create path but then fails the quota
recheck because a parallel request filled up the quota first.
"""
# First quota check succeeds, second (recheck) fails.
mock_check.side_effect = [None,
exception.OverQuota(overs='server_groups')]
e = self._test_create_server_group_during_recheck(mock_check)
expected = 'Quota exceeded, too many server groups.'
self.assertEqual(expected, str(e))
def _test_create_server_group_recheck_disabled(self):
self.flags(recheck_quota=False, group='quota')
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
self.controller.create(self.req, body={'server_group': sgroup})
@mock.patch('nova.objects.Quotas.check_deltas')
def test_create_server_group_recheck_disabled(self, mock_check):
self._test_create_server_group_recheck_disabled()
ctxt = self.req.environ['nova.context']
mock_check.assert_called_once_with(ctxt, {'server_groups': 1},
ctxt.project_id, ctxt.user_id)
@ -170,3 +200,75 @@ class ServerGroupQuotasTestV21(test.TestCase):
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
def setUp(self):
super(ServerGroupQuotasUnifiedLimitsTestV21, self).setUp()
self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota')
self.req = fakes.HTTPRequest.blank('')
self.controller = sg_v21.ServerGroupController()
self.useFixture(limit_fixture.LimitFixture({'server_groups': 10}, {}))
@mock.patch('nova.limit.local.enforce_db_limit')
def test_create_server_group_during_recheck(self, mock_enforce):
"""Simulate a race where this request initially has enough quota to
progress partially through the create path but then fails the quota
recheck because a parallel request filled up the quota first.
"""
# First quota check succeeds, second (recheck) fails.
mock_enforce.side_effect = [
None,
exception.ServerGroupLimitExceeded(message='oslo.limit message')]
# Run the test using the unified limits enforce method.
e = self._test_create_server_group_during_recheck(mock_enforce)
expected = 'oslo.limit message'
self.assertEqual(expected, str(e))
@mock.patch('nova.limit.local.enforce_db_limit')
def test_create_server_group_recheck_disabled(self, mock_enforce):
# Run the test using the unified limits enforce method.
self._test_create_server_group_recheck_disabled()
ctxt = self.req.environ['nova.context']
mock_enforce.assert_called_once_with(ctxt, 'server_groups',
entity_scope=ctxt.project_id,
delta=1)
def test_create_group_fails_with_zero_quota(self):
self.useFixture(limit_fixture.LimitFixture({'server_groups': 0}, {}))
sgroup = {'name': 'test', 'policies': ['anti-affinity']}
exc = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
self.req, body={'server_group': sgroup})
msg = ("Resource %s is over limit" % local_limit.SERVER_GROUPS)
self.assertIn(msg, str(exc))
def test_create_only_one_group_when_limit_is_one(self):
self.useFixture(limit_fixture.LimitFixture({'server_groups': 1}, {}))
policies = ['anti-affinity']
sgroup = {'name': 'test', 'policies': policies}
res_dict = self.controller.create(
self.req, body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
# prove we can't create two, as limited to one
sgroup2 = {'name': 'test2', 'policies': policies}
exc = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
self.req, body={'server_group': sgroup2})
msg = ("Resource %s is over limit" % local_limit.SERVER_GROUPS)
self.assertIn(msg, str(exc))
# delete first one
self.controller.delete(self.req, res_dict['server_group']['id'])
# prove we can now create the second one
res_dict2 = self.controller.create(
self.req, body={'server_group': sgroup2})
self.assertEqual(res_dict2['server_group']['name'], 'test2')
self.assertTrue(
uuidutils.is_uuid_like(res_dict2['server_group']['id']))
self.assertEqual(res_dict2['server_group']['policies'], policies)

View File

@ -17,10 +17,12 @@
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_limit import fixture as limit_fixture
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova.limit import local as local_limit
from nova.objects import keypair as keypair_obj
from nova import quota
from nova.tests.unit.compute import test_compute
@ -119,7 +121,7 @@ class CreateImportSharedTestMixIn(object):
exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
name, *args)
self.assertEqual(expected_message, str(exc))
self.assertIn(expected_message, str(exc))
def assertInvalidKeypair(self, expected_message, name):
msg = 'Keypair data is invalid: %s' % expected_message
@ -158,6 +160,48 @@ class CreateImportSharedTestMixIn(object):
msg = "Quota exceeded, too many key pairs."
self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
def _test_quota_during_recheck(self, mock_method, msg):
# Skip for import key pair due to bug 1959732.
if self.func_name == 'import_key_pair':
self.skipTest('bug/1959732: import_key_pair missing quota recheck')
self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
self.assertEqual(2, mock_method.call_count)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_quota_during_recheck(self, mock_check):
"""Simulate a race where this request initially has enough quota to
progress partially through the create path but then fails the quota
recheck because a parallel request filled up the quota first.
"""
# First quota check succeeds, second (recheck) fails.
mock_check.side_effect = [None,
exception.OverQuota(overs='key_pairs')]
msg = "Quota exceeded, too many key pairs."
self._test_quota_during_recheck(mock_check, msg)
def test_quota_unified_limits(self):
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
self.useFixture(limit_fixture.LimitFixture(
{'server_key_pairs': 0}, {}))
msg = ("Resource %s is over limit" % local_limit.KEY_PAIRS)
self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
@mock.patch('nova.limit.local.enforce_db_limit')
def test_quota_during_recheck_unified_limits(self, mock_enforce):
"""Simulate a race where this request initially has enough quota to
progress partially through the create path but then fails the quota
recheck because a parallel request filled up the quota first.
"""
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
self.useFixture(limit_fixture.LimitFixture(
{'server_key_pairs': 100}, {}))
# First quota check succeeds, second (recheck) fails.
mock_enforce.side_effect = [
None, exception.KeypairLimitExceeded('oslo.limit message')]
msg = 'oslo.limit message'
self._test_quota_during_recheck(mock_enforce, msg)
class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'create_key_pair'
@ -192,6 +236,27 @@ class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
self.assertRaises(processutils.ProcessExecutionError,
self._check_success)
def test_success_unified_limits(self):
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
self.useFixture(limit_fixture.LimitFixture(
{'server_key_pairs': 1}, {}))
self._check_success()
@mock.patch('nova.objects.Quotas.check_deltas')
def test_quota_recheck_disabled(self, mock_check):
self.flags(recheck_quota=False, group="quota")
self._check_success()
self.assertEqual(1, mock_check.call_count)
@mock.patch('nova.limit.local.enforce_db_limit')
def test_quota_recheck_disabled_unified_limits(self, mock_enforce):
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
self.flags(recheck_quota=False, group="quota")
self.useFixture(limit_fixture.LimitFixture(
{'server_key_pairs': 1}, {}))
self._check_success()
self.assertEqual(1, mock_enforce.call_count)
class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'import_key_pair'
@ -240,6 +305,27 @@ class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
msg = u'Keypair data is invalid: failed to generate fingerprint'
self.assertEqual(msg, str(exc))
def test_success_unified_limits(self):
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
self.useFixture(limit_fixture.LimitFixture(
{'server_key_pairs': 1}, {}))
self._check_success()
@mock.patch('nova.objects.Quotas.check_deltas')
def test_quota_recheck_disabled(self, mock_check):
self.flags(recheck_quota=False, group="quota")
self._check_success()
self.assertEqual(1, mock_check.call_count)
@mock.patch('nova.limit.local.enforce_db_limit')
def test_quota_recheck_disabled_unified_limits(self, mock_enforce):
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
self.flags(recheck_quota=False, group="quota")
self.useFixture(limit_fixture.LimitFixture(
{'server_key_pairs': 1}, {}))
self._check_success()
self.assertEqual(1, mock_enforce.call_count)
class GetKeypairTestCase(KeypairAPITestCase):
def test_success(self):

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
@ -202,3 +204,53 @@ class TestLocalLimits(test.NoDBTestCase):
local_limit.enforce_db_limit,
self.context, local_limit.SERVER_GROUP_MEMBERS,
uuids.server_group, 11)
@mock.patch.object(objects.InstanceGroupList, "get_counts")
def test_get_in_use(self, mock_count):
mock_count.return_value = {'project': {'server_groups': 9}}
usages = local_limit.get_in_use(self.context, uuids.project_id)
expected_usages = {
'injected_file_content_bytes': 0,
'injected_file_path_bytes': 0,
'injected_files': 0,
'key_pairs': 0,
'metadata_items': 0,
'server_group_members': 0,
'server_groups': 9
}
self.assertEqual(expected_usages, usages)
class GetLegacyLimitsTest(test.NoDBTestCase):
def setUp(self):
super(GetLegacyLimitsTest, self).setUp()
self.new = {"server_metadata_items": 1,
"server_injected_files": 2,
"server_injected_file_content_bytes": 3,
"server_injected_file_path_bytes": 4,
"server_key_pairs": 5,
"server_groups": 6,
"server_group_members": 7}
self.legacy = {"metadata_items": 1,
"injected_files": 2,
"injected_file_content_bytes": 3,
"injected_file_path_bytes": 4,
"key_pairs": 5,
"server_groups": 6,
"server_group_members": 7}
self.resources = list(local_limit.API_LIMITS | local_limit.DB_LIMITS)
self.resources.sort()
self.flags(driver=local_limit.UNIFIED_LIMITS_DRIVER, group="quota")
def test_convert_keys_to_legacy_name(self):
limits = local_limit._convert_keys_to_legacy_name(self.new)
self.assertEqual(self.legacy, limits)
def test_get_legacy_default_limits(self):
reglimits = copy.deepcopy(self.new)
reglimits.pop('server_key_pairs')
self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
limits = local_limit.get_legacy_default_limits()
expected = copy.deepcopy(self.legacy)
expected['key_pairs'] = 0
self.assertEqual(expected, limits)

View File

@ -17,12 +17,15 @@
import ddt
import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import api as compute
import nova.conf
from nova import context
from nova.db.main import models
from nova import exception
from nova.limit import local as local_limit
from nova import objects
from nova import quota
from nova import test
@ -97,7 +100,7 @@ class QuotaIntegrationTestCase(test.TestCase):
# _instances_cores_ram_count().
inst_map = objects.InstanceMapping(
self.context, instance_uuid=inst.uuid, project_id=inst.project_id,
cell_mapping=cell1)
user_id=inst.user_id, cell_mapping=cell1)
inst_map.create()
return inst
@ -205,6 +208,71 @@ class QuotaIntegrationTestCase(test.TestCase):
self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
def _test_with_server_group_members(self):
# use a known image uuid to avoid ImageNotFound errors
image_uuid = nova_fixtures.GlanceFixture.image4['id']
instance_group = objects.InstanceGroup(self.context,
policy="anti-affinity")
instance_group.name = "foo"
instance_group.project_id = self.context.project_id
instance_group.user_id = self.context.user_id
instance_group.uuid = uuids.instance_group
instance_group.create()
self.addCleanup(instance_group.destroy)
self.compute_api.create(
self.context, flavor=self.flavor,
image_href=image_uuid,
scheduler_hints={'group': uuids.instance_group},
check_server_group_quota=True)
exc = self.assertRaises(exception.OverQuota, self.compute_api.create,
self.context,
flavor=self.flavor,
image_href=image_uuid,
scheduler_hints={
'group': uuids.instance_group},
check_server_group_quota=True)
return exc
def test_with_server_group_members(self):
self.flags(server_group_members=1, group="quota")
exc = self._test_with_server_group_members()
self.assertEqual("Quota exceeded, too many servers in group", str(exc))
class UnifiedLimitsIntegrationTestCase(QuotaIntegrationTestCase):
"""Test that API and DB resources enforce properly with unified limits."""
def setUp(self):
super(UnifiedLimitsIntegrationTestCase, self).setUp()
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
local_limit.INJECTED_FILES: 5,
local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
local_limit.INJECTED_FILES_PATH: 255,
local_limit.KEY_PAIRS: 100,
local_limit.SERVER_GROUPS: 10,
local_limit.SERVER_GROUP_MEMBERS: 10}
self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
def test_too_many_instances(self):
# TODO(johngarbutt) needs updating once we enforce resource limits
pass
def test_too_many_cores(self):
# TODO(johngarbutt) needs updating once we enforce resource limits
pass
def test_with_server_group_members(self):
self.useFixture(limit_fixture.LimitFixture(
{local_limit.SERVER_GROUP_MEMBERS: 1}, {}))
exc = self._test_with_server_group_members()
msg = ("Resource %s is over limit" % local_limit.SERVER_GROUP_MEMBERS)
self.assertIn(msg, str(exc))
@enginefacade.transaction_context_provider
class FakeContext(context.RequestContext):