Add caching of limits in Enforcer

This adds caching of resource limits for an Enforcer in order to
improve performance when repeated limit checks are needed. The cache
lasts the lifetime of an Enforcer and is enabled by default. It can be
disabled by passing cache=False when instantiating an Enforcer.

One usage pattern for a caching Enforcer would be to create an Enforcer
per service request so that the caching lives only as long as the
request.

Change-Id: I8e43dceec76aecd2b2ae23a137e56519efe29777
This commit is contained in:
melanie witt 2021-07-28 23:13:17 +00:00
parent caa75c1bab
commit 43683f543e
4 changed files with 86 additions and 11 deletions

View File

@ -31,7 +31,7 @@ class LimitFixture(fixtures.Fixture):
As in reality, only per-project overrides need be
provided here; any unmentioned projects or
resources will take the registered limit defaults.
:type reglimits: dict
:type projlimits: dict
"""
self.reglimits = reglimits
self.projlimits = projlimits

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from collections import namedtuple
from keystoneauth1 import exceptions as ksa_exceptions
@ -55,31 +56,34 @@ def _get_keystone_connection():
class Enforcer(object):
def __init__(self, usage_callback):
def __init__(self, usage_callback, cache=True):
"""An object for checking usage against resource limits and requests.
:param usage_callback: A callable function that accepts a project_id
string as a parameter and calculates the current
usage of a resource.
:type usage_callback: callable function
:param cache: Whether to cache resource limits for the lifetime of this
enforcer. Defaults to True.
:type cache: boolean
"""
if not callable(usage_callback):
msg = 'usage_callback must be a callable function.'
raise ValueError(msg)
self.connection = _get_keystone_connection()
self.model = self._get_model_impl(usage_callback)
self.model = self._get_model_impl(usage_callback, cache=cache)
def _get_enforcement_model(self):
"""Query keystone for the configured enforcement model."""
return self.connection.get('/limits/model').json()['model']['name']
def _get_model_impl(self, usage_callback):
def _get_model_impl(self, usage_callback, cache=True):
"""get the enforcement model based on configured model in keystone."""
model = self._get_enforcement_model()
for impl in _MODELS:
if model == impl.name:
return impl(usage_callback)
return impl(usage_callback, cache=cache)
raise ValueError("enforcement model %s is not supported" % model)
def enforce(self, project_id, deltas):
@ -167,16 +171,16 @@ class Enforcer(object):
usage = self.model.get_project_usage(project_id, resources_to_check)
return {resource: ProjectUsage(limit, usage[resource])
for resource, limit in dict(limits).items()}
for resource, limit in limits}
class _FlatEnforcer(object):
name = 'flat'
def __init__(self, usage_callback):
def __init__(self, usage_callback, cache=True):
self._usage_callback = usage_callback
self._utils = _EnforcerUtils()
self._utils = _EnforcerUtils(cache=cache)
def get_project_limits(self, project_id, resources_to_check):
return self._utils.get_project_limits(project_id, resources_to_check)
@ -202,7 +206,7 @@ class _StrictTwoLevelEnforcer(object):
name = 'strict-two-level'
def __init__(self, usage_callback):
def __init__(self, usage_callback, cache=True):
self._usage_callback = usage_callback
def get_project_limits(self, project_id, resources_to_check):
@ -229,8 +233,13 @@ class _LimitNotFound(Exception):
class _EnforcerUtils(object):
"""Logic common used by multiple enforcers"""
def __init__(self):
def __init__(self, cache=True):
self.connection = _get_keystone_connection()
self.should_cache = cache
# {project_id: {resource_name: project_limit}}
self.plimit_cache = defaultdict(dict)
# {resource_name: registered_limit}
self.rlimit_cache = {}
# get and cache endpoint info
endpoint_id = CONF.oslo_limit.endpoint_id
@ -298,12 +307,32 @@ class _EnforcerUtils(object):
return project_limits
def _get_limit(self, project_id, resource_name):
# TODO(johngarbutt): might need to cache here
# If we are configured to cache limits, look in the cache first and use
# the cached value if there is one. Else, retrieve the limit and add it
# to the cache. Do this for both project limits and registered limits.
# Look for a project limit first.
if (project_id in self.plimit_cache and
resource_name in self.plimit_cache[project_id]):
return self.plimit_cache[project_id][resource_name].resource_limit
project_limit = self._get_project_limit(project_id, resource_name)
if self.should_cache and project_limit:
self.plimit_cache[project_id][resource_name] = project_limit
if project_limit:
return project_limit.resource_limit
# If there is no project limit, look for a registered limit.
if resource_name in self.rlimit_cache:
return self.rlimit_cache[resource_name].default_limit
registered_limit = self._get_registered_limit(resource_name)
if self.should_cache and registered_limit:
self.rlimit_cache[resource_name] = registered_limit
if registered_limit:
return registered_limit.default_limit

View File

@ -29,6 +29,7 @@ from oslo_config import fixture as config_fixture
from oslotest import base
from oslo_limit import exception
from oslo_limit import fixture
from oslo_limit import limit
from oslo_limit import opts
@ -309,3 +310,41 @@ class TestEnforcerUtils(base.BaseTestCase):
self.assertEqual(0, over_d.limit)
self.assertEqual(0, over_d.current_usage)
self.assertEqual(0, over_d.delta)
def test_get_limit_cache(self, cache=True):
# No project limit and registered limit = 5
fix = self.useFixture(fixture.LimitFixture({'foo': 5}, {}))
project_id = uuid.uuid4().hex
utils = limit._EnforcerUtils(cache=cache)
foo_limit = utils._get_limit(project_id, 'foo')
self.assertEqual(5, foo_limit)
self.assertEqual(1, fix.mock_conn.registered_limits.call_count)
# Second call should be cached, so call_count for registered limits
# should remain 1. When cache is disabled, it should increase to 2
foo_limit = utils._get_limit(project_id, 'foo')
self.assertEqual(5, foo_limit)
count = 1 if cache else 2
self.assertEqual(count, fix.mock_conn.registered_limits.call_count)
# Add a project limit = 1
fix.projlimits[project_id] = {'foo': 1}
foo_limit = utils._get_limit(project_id, 'foo')
self.assertEqual(1, foo_limit)
# Project limits should have been queried 3 times total, once per
# _get_limit call
self.assertEqual(3, fix.mock_conn.limits.call_count)
# Fourth call should be cached, so call_count for project limits should
# remain 3. When cache is disabled, it should increase to 4
foo_limit = utils._get_limit(project_id, 'foo')
self.assertEqual(1, foo_limit)
count = 3 if cache else 4
self.assertEqual(count, fix.mock_conn.limits.call_count)
def test_get_limit_no_cache(self):
self.test_get_limit_cache(cache=False)

View File

@ -0,0 +1,7 @@
---
features:
- |
``Enforcer`` objects now cache limits by default for the lifetime of the
object to provide improved performance when multiple calls of ``enforce()``
are needed. This behavior is controlled by the boolean ``cache`` keyword
argument to the ``__init__`` method.