Use oslo_cache in auth_token middleware

Use the new oslo.cache library instead of using memcached directly.
This keeps the old options around and will continue to use those in
preference to the oslo.config library as there is no way to test whether
oslo.cache was explicitly configured to use that in preference.

Currently there are no messages or anything to deprecate the old options
until we've had a chance to test it in production environments.

Closes-Bug: #1523375
Change-Id: Ifccacc5db311ad538ce60191cbe221644d1a5807
Co-Authored-By: Nicolas Helgeson <nh202b@att.com>
This commit is contained in:
Jamie Lennox 2016-01-17 00:34:02 +11:00 committed by Jaewoo Park
parent 4c08d725c2
commit 9d8e2836fe
6 changed files with 33 additions and 189 deletions

View File

@ -225,6 +225,8 @@ from keystoneauth1 import loading
from keystoneauth1.loading import session as session_loading
from keystoneclient.common import cms
from keystoneclient import exceptions as ksc_exceptions
import oslo_cache
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import webob.dec
@ -245,6 +247,7 @@ from keystonemiddleware.i18n import _
_LOG = logging.getLogger(__name__)
_CACHE_INVALID_INDICATOR = 'invalid'
oslo_cache.configure(cfg.CONF)
AUTH_TOKEN_OPTS = [
@ -952,7 +955,15 @@ class AuthProtocol(BaseAuthProtocol):
include_service_catalog=self._include_service_catalog,
requested_auth_version=auth_version)
def _create_oslo_cache(self):
# having this as a function makes test mocking easier
conf = cfg.CONF
region = oslo_cache.create_region()
oslo_cache.configure_cache_region(conf, region)
return region
def _token_cache_factory(self):
security_strategy = self._conf.get('memcache_security_strategy')
cache_kwargs = dict(

View File

@ -13,13 +13,13 @@
import contextlib
import hashlib
from oslo_cache import _memcache_pool as memcache_pool
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from keystonemiddleware.auth_token import _exceptions as exc
from keystonemiddleware.auth_token import _memcache_crypt as memcache_crypt
from keystonemiddleware.auth_token import _memcache_pool as memcache_pool
from keystonemiddleware.i18n import _

View File

@ -1,186 +0,0 @@
# Copyright 2014 Mirantis Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Thread-safe connection pool for python-memcached."""
# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
# and should be kept in sync until we can use external library for this.
import collections
import contextlib
import itertools
import time
from oslo_log import log as logging
from six.moves import queue
_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
class ConnectionGetTimeoutException(Exception):
pass
class ConnectionPool(queue.Queue):
"""Base connection pool class.
This class implements the basic connection pool logic as an abstract base
class.
"""
def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
"""Initialize the connection pool.
:param maxsize: maximum number of client connections for the pool
:type maxsize: int
:param unused_timeout: idle time to live for unused clients (in
seconds). If a client connection object has been
in the pool and idle for longer than the
unused_timeout, it will be reaped. This is to
ensure resources are released as utilization
goes down.
:type unused_timeout: int
:param conn_get_timeout: maximum time in seconds to wait for a
connection. If set to `None` timeout is
indefinite.
:type conn_get_timeout: int
"""
queue.Queue.__init__(self, maxsize)
self._unused_timeout = unused_timeout
self._connection_get_timeout = conn_get_timeout
self._acquired = 0
self._LOG = logging.getLogger(__name__)
def _create_connection(self):
raise NotImplementedError
def _destroy_connection(self, conn):
raise NotImplementedError
@contextlib.contextmanager
def acquire(self):
try:
conn = self.get(timeout=self._connection_get_timeout)
except queue.Empty:
self._LOG.critical('Unable to get a connection from pool id '
'%(id)s after %(seconds)s seconds.',
{'id': id(self),
'seconds': self._connection_get_timeout})
raise ConnectionGetTimeoutException()
try:
yield conn
finally:
self.put(conn)
def _qsize(self):
return self.maxsize - self._acquired
if not hasattr(queue.Queue, '_qsize'):
qsize = _qsize
def _get(self):
if self.queue:
conn = self.queue.pop().connection
else:
conn = self._create_connection()
self._acquired += 1
return conn
def _put(self, conn):
self.queue.append(_PoolItem(
ttl=time.time() + self._unused_timeout,
connection=conn,
))
self._acquired -= 1
# Drop all expired connections from the right end of the queue
now = time.time()
while self.queue and self.queue[0].ttl < now:
conn = self.queue.popleft().connection
self._destroy_connection(conn)
class MemcacheClientPool(ConnectionPool):
def __init__(self, urls, dead_retry=None, socket_timeout=None, **kwargs):
ConnectionPool.__init__(self, **kwargs)
self._urls = urls
self._dead_retry = dead_retry
self._socket_timeout = socket_timeout
# NOTE(morganfainberg): The host objects expect an int for the
# deaduntil value. Initialize this at 0 for each host with 0 indicating
# the host is not dead.
self._hosts_deaduntil = [0] * len(urls)
# NOTE(morganfainberg): Lazy import to allow middleware to work with
# python 3k even if memcache will not due to python 3k
# incompatibilities within the python-memcache library.
global memcache
import memcache
# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
# Don't inherit client from threading.local so that we can reuse
# clients in different threads
MemcacheClient = type('_MemcacheClient', (object,),
dict(memcache.Client.__dict__))
self._memcache_client_class = MemcacheClient
def _create_connection(self):
return self._memcache_client_class(self._urls,
dead_retry=self._dead_retry,
socket_timeout=self._socket_timeout)
def _destroy_connection(self, conn):
conn.disconnect_all()
def _get(self):
conn = ConnectionPool._get(self)
try:
# Propagate host state known to us to this client's list
now = time.time()
for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
if deaduntil > now and host.deaduntil <= now:
host.mark_dead('propagating death mark from the pool')
host.deaduntil = deaduntil
except Exception:
# We need to be sure that connection doesn't leak from the pool.
# This code runs before we enter context manager's try-finally
# block, so we need to explicitly release it here
ConnectionPool._put(self, conn)
raise
return conn
def _put(self, conn):
try:
# If this client found that one of the hosts is dead, mark it as
# such in our internal list
now = time.time()
for i, deaduntil, host in zip(itertools.count(),
self._hosts_deaduntil,
conn.servers):
# Do nothing if we already know this host is dead
if deaduntil <= now:
if host.deaduntil > now:
self._hosts_deaduntil[i] = host.deaduntil
else:
self._hosts_deaduntil[i] = 0
# If all hosts are dead we should forget that they're dead. This
# way we won't get completely shut off until dead_retry seconds
# pass, but will be checking servers as frequent as we can (over
# way smaller socket_timeout)
if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
finally:
ConnectionPool._put(self, conn)

View File

@ -28,6 +28,7 @@ from keystoneauth1 import session
from keystoneclient.common import cms
from keystoneclient import exceptions as ksc_exceptions
import mock
import oslo_cache
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
@ -252,6 +253,17 @@ class v3CompositeFakeApp(CompositeBase, v3FakeApp):
v3_default_service_env_additions)
class FakeOsloCache(_cache._FakeClient):
"""A fake oslo_cache object.
The memcache and oslo_cache interfaces are almost the same except we need
to return NO_VALUE when not found.
"""
def get(self, key):
return super(FakeOsloCache, self).get(key) or oslo_cache.NO_VALUE
class BaseAuthTokenMiddlewareTest(base.BaseAuthTokenTestCase):
"""Base test class for auth_token middleware.
@ -270,6 +282,12 @@ class BaseAuthTokenMiddlewareTest(base.BaseAuthTokenTestCase):
super(BaseAuthTokenMiddlewareTest, self).setUp()
self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# the default oslo_cache is null cache, always use an in-mem cache
self.useFixture(fixtures.MockPatchObject(auth_token.AuthProtocol,
'_create_oslo_cache',
return_value=FakeOsloCache()))
self.expected_env = expected_env or dict()
self.fake_app = fake_app or FakeApp
self.middleware = None

View File

@ -13,11 +13,11 @@
import time
import mock
from oslo_cache import _memcache_pool
from six.moves import queue
import testtools
from testtools import matchers
from keystonemiddleware.auth_token import _memcache_pool
from keystonemiddleware.tests.unit import utils
@ -109,7 +109,7 @@ class TestConnectionPool(utils.TestCase):
# Make sure we've consumed the only available connection from the pool
conn = connection_pool.get_nowait()
self.assertRaises(_memcache_pool.ConnectionGetTimeoutException,
self.assertRaises(_memcache_pool.exception.QueueEmpty,
_acquire_connection)
# Put the connection back and ensure we can acquire the connection

View File

@ -3,6 +3,7 @@
# process, which may cause wedges in the gate later.
keystoneauth1>=3.2.0 # Apache-2.0
oslo.cache>=1.26.0 # Apache-2.0
oslo.config>=4.6.0 # Apache-2.0
oslo.context>=2.19.2 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0