Update dogpile cache with an async worker

This commit changes how we refresh our cache from doing it directly on
stale requests to do it async in the background with a worker thread.
This leverages dogpile.cache's async_creation_runner mechanism to start
a background thread when the cache goes stale that will update the cache
while still returning the cached copy in them meantime.

To enable this you need to use a dogpile.cache backend that supports
using a distributed_lock. This does not include the default dbm backend,
using a memcached based backend, like dogpile.cache.pylibmc, is
recommended for enabling this functionality.

Change-Id: I0fd29839c72ca2fdfb4c4724bb3da7e283e3d27d
This commit is contained in:
Matthew Treinish 2016-05-25 15:39:34 -04:00
parent 0d902942ca
commit a839a02be1
No known key found for this signature in database
GPG Key ID: FD12A0F214C9E177
2 changed files with 42 additions and 9 deletions

View File

@ -141,7 +141,7 @@ interactive response times we cache the api response from requests using
elasticsearch data. Note, that this caching is enabled regardless of whether
elastic-recheck is enabled or not.
There are three configuration options available around configuring caching.
There are four configuration options available around configuring caching.
While the defaults were picked to work in most situations depending on your
specific deployment specifics there are other choices that might make more
sense.
@ -161,6 +161,16 @@ The third option is `cache_file` which is used to set the file path when using
the DBM backend is used. By default this is configured to use
TEMPDIR/openstack-health.dbm
The fourth option is `cache_url` which is used to provide the url to an external
service, like memcached, for storing the cache data. This only needs to be set
if you're using a backend that requires this.
It also should be noted that when configuring caching using a non-default
backend the API server will attempt to configure refreshing the cache
asynchronously with a background thread. This makes the end user response near
instantaneous in all cases because the cache is updated in the background
instead of on an incoming request.
Frontend
--------
The production application can be build using::

View File

@ -119,7 +119,7 @@ def setup():
except ConfigParser.Error:
backend = 'dogpile.cache.dbm'
try:
expire = config.get('default', 'cache_expiration')
expire = config.getint('default', 'cache_expiration')
except ConfigParser.Error:
expire = datetime.timedelta(minutes=30)
try:
@ -127,15 +127,20 @@ def setup():
except ConfigParser.Error:
cache_file = os.path.join(tempfile.gettempdir(),
'openstack-health.dbm')
cache_url = _config_get(config.get, 'default', 'cache_url', None)
global region
if backend == 'dogpile.cache.dbm':
args = {'filename': cache_file}
region = dogpile.cache.make_region().configure(
backend, expiration_time=expire, arguments=args)
else:
args = {}
region = dogpile.cache.make_region().configure(backend,
expiration_time=expire,
arguments=args)
args = {'distributed_lock': True}
if cache_url:
args['url'] = cache_url
region = dogpile.cache.make_region(
async_creation_runner=_periodic_refresh_cache).configure(
backend, expiration_time=expire, arguments=args)
def get_session():
@ -462,12 +467,15 @@ def get_recent_failed_runs_rss(run_metadata_key, value):
@app.route('/tests/recent/<string:status>', methods=['GET'])
def get_recent_test_status(status):
def get_recent_test_status(status, num_runs=None):
global region
if not region:
setup()
status = parse.unquote(status)
num_runs = flask.request.args.get('num_runs', 10)
try:
num_runs = flask.request.args.get('num_runs', 10)
except RuntimeError:
num_runs = num_runs or 10
bug_dict = {}
query_threads = []
@ -505,7 +513,22 @@ def get_recent_test_status(status):
for thread in query_threads:
thread.join()
return {'test_runs': output, 'bugs': bug_dict}
return jsonify(_get_recent(status))
results = _get_recent(status)
try:
return jsonify(results)
except RuntimeError:
return results
def _periodic_refresh_cache(cache, status, creator, mutex):
def runner():
try:
value = creator()
cache.set(status, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
@app.route('/run/<string:run_id>/tests', methods=['GET'])