Introduce centralized database driver for image cache
Added centralized_db driver for image cache which will use centralized database for storing cache related information. Related blueprint centralized-cache-db Change-Id: Iafaaa86666176cc95f77d85a4ab77286f0042bdd
This commit is contained in:
parent
10d663d04d
commit
0bbaec2eeb
|
@ -33,7 +33,7 @@ LOG = logging.getLogger(__name__)
|
|||
|
||||
image_cache_opts = [
|
||||
cfg.StrOpt('image_cache_driver', default='sqlite',
|
||||
choices=('sqlite', 'xattr'), ignore_case=True,
|
||||
choices=('centralized_db', 'sqlite', 'xattr'), ignore_case=True,
|
||||
help=_("""
|
||||
The driver to use for image cache management.
|
||||
|
||||
|
@ -55,6 +55,8 @@ store the information about cached images:
|
|||
* The ``xattr`` driver uses the extended attributes of files to store this
|
||||
information. It also requires a filesystem that sets ``atime`` on the files
|
||||
when accessed.
|
||||
* The ``centralized_db`` driver uses a central database (which will be common
|
||||
for all glance nodes) to track the usage of cached images.
|
||||
|
||||
Deprecation warning:
|
||||
* As centralized database will now be used for image cache management, the
|
||||
|
@ -62,6 +64,7 @@ Deprecation warning:
|
|||
development cycle.
|
||||
|
||||
Possible values:
|
||||
* centralized_db
|
||||
* sqlite
|
||||
* xattr
|
||||
|
||||
|
|
|
@ -0,0 +1,388 @@
|
|||
# Copyright 2024 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Cache driver that uses Centralized database of glance to store information
|
||||
about cached images
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import fileutils
|
||||
|
||||
from glance.common import exception
|
||||
from glance import context
|
||||
import glance.db
|
||||
from glance.i18n import _LI, _LW
|
||||
from glance.image_cache.drivers import base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class Driver(base.Driver):
|
||||
|
||||
"""
|
||||
Cache driver that uses xattr file tags and requires a filesystem
|
||||
that has atimes set.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.context = context.get_admin_context()
|
||||
self.db_api = glance.db.get_api()
|
||||
|
||||
def configure(self):
|
||||
"""
|
||||
Configure the driver to use the stored configuration options
|
||||
Any store that needs special configuration should implement
|
||||
this method. If the store was not able to successfully configure
|
||||
itself, it should raise `exception.BadDriverConfiguration`
|
||||
"""
|
||||
super(Driver, self).configure()
|
||||
lockutils.set_defaults(self.base_dir)
|
||||
|
||||
# NOTE(abhishekk): Record the node reference in the database for
|
||||
# future use.
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
if node_reference_url:
|
||||
try:
|
||||
self.db_api.node_reference_create(
|
||||
self.context, node_reference_url)
|
||||
except exception.Duplicate:
|
||||
LOG.debug("Node reference is already recorded, ignoring it")
|
||||
|
||||
def get_cache_size(self):
|
||||
"""
|
||||
Returns the total size in bytes of the image cache.
|
||||
"""
|
||||
sizes = []
|
||||
for path in self.get_cache_files(self.base_dir):
|
||||
file_info = os.stat(path)
|
||||
sizes.append(file_info[stat.ST_SIZE])
|
||||
return sum(sizes)
|
||||
|
||||
def get_hit_count(self, image_id):
|
||||
"""
|
||||
Return the number of hits that an image has.
|
||||
|
||||
:param image_id: Opaque image identifier
|
||||
"""
|
||||
if not self.is_cached(image_id):
|
||||
return 0
|
||||
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
return self.db_api.get_hit_count(self.context, image_id,
|
||||
node_reference_url)
|
||||
|
||||
def get_cached_images(self):
|
||||
"""
|
||||
Returns a list of records about cached images.
|
||||
"""
|
||||
LOG.debug("Gathering cached image entries.")
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
return self.db_api.get_cached_images(
|
||||
self.context, node_reference_url)
|
||||
|
||||
def is_cached(self, image_id):
|
||||
"""
|
||||
Returns True if the image with the supplied ID has its image
|
||||
file cached.
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
return os.path.exists(self.get_image_filepath(image_id))
|
||||
|
||||
def is_cacheable(self, image_id):
|
||||
"""
|
||||
Returns True if the image with the supplied ID can have its
|
||||
image file cached, False otherwise.
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
# Make sure we're not already cached or caching the image
|
||||
return not (self.is_cached(image_id) or
|
||||
self.is_being_cached(image_id))
|
||||
|
||||
def is_being_cached(self, image_id):
|
||||
"""
|
||||
Returns True if the image with supplied id is currently
|
||||
in the process of having its image file cached.
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
path = self.get_image_filepath(image_id, 'incomplete')
|
||||
return os.path.exists(path)
|
||||
|
||||
def is_queued(self, image_id):
|
||||
"""
|
||||
Returns True if the image identifier is in our cache queue.
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
path = self.get_image_filepath(image_id, 'queue')
|
||||
return os.path.exists(path)
|
||||
|
||||
def delete_all_cached_images(self):
|
||||
"""
|
||||
Removes all cached image files and any attributes about the images
|
||||
"""
|
||||
deleted = 0
|
||||
for path in self.get_cache_files(self.base_dir):
|
||||
delete_cached_file(path)
|
||||
deleted += 1
|
||||
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
self.db_api.delete_all_cached_images(
|
||||
self.context, node_reference_url)
|
||||
|
||||
return deleted
|
||||
|
||||
def delete_cached_image(self, image_id):
|
||||
"""
|
||||
Removes a specific cached image file and any attributes about the image
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
path = self.get_image_filepath(image_id)
|
||||
delete_cached_file(path)
|
||||
self.db_api.delete_cached_image(
|
||||
self.context, image_id, node_reference_url)
|
||||
|
||||
def delete_all_queued_images(self):
|
||||
"""
|
||||
Removes all queued image files and any attributes about the images
|
||||
"""
|
||||
files_deleted = 0
|
||||
for file in self.get_cache_files(self.queue_dir):
|
||||
fileutils.delete_if_exists(file)
|
||||
files_deleted += 1
|
||||
return files_deleted
|
||||
|
||||
def delete_queued_image(self, image_id):
|
||||
"""
|
||||
Removes a specific queued image file and any attributes about the image
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
path = self.get_image_filepath(image_id, 'queue')
|
||||
fileutils.delete_if_exists(path)
|
||||
|
||||
def clean(self, stall_time=None):
|
||||
"""
|
||||
Delete any image files in the invalid directory and any
|
||||
files in the incomplete directory that are older than a
|
||||
configurable amount of time.
|
||||
"""
|
||||
self.delete_invalid_files()
|
||||
|
||||
if stall_time is None:
|
||||
stall_time = CONF.image_cache_stall_time
|
||||
|
||||
now = time.time()
|
||||
older_than = now - stall_time
|
||||
self.delete_stalled_files(older_than)
|
||||
|
||||
def get_least_recently_accessed(self):
|
||||
"""
|
||||
Return a tuple containing the image_id and size of the least recently
|
||||
accessed cached file, or None if no cached files.
|
||||
"""
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
image_id = self.db_api.get_least_recently_accessed(
|
||||
self.context, node_reference_url)
|
||||
|
||||
path = self.get_image_filepath(image_id)
|
||||
try:
|
||||
file_info = os.stat(path)
|
||||
size = file_info[stat.ST_SIZE]
|
||||
except OSError:
|
||||
size = 0
|
||||
return image_id, size
|
||||
|
||||
@contextmanager
|
||||
def open_for_write(self, image_id):
|
||||
"""
|
||||
Open a file for writing the image file for an image
|
||||
with supplied identifier.
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
incomplete_path = self.get_image_filepath(image_id, 'incomplete')
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
|
||||
def commit():
|
||||
final_path = self.get_image_filepath(image_id)
|
||||
LOG.debug("Fetch finished, moving "
|
||||
"'%(incomplete_path)s' to '%(final_path)s'",
|
||||
dict(incomplete_path=incomplete_path,
|
||||
final_path=final_path))
|
||||
os.rename(incomplete_path, final_path)
|
||||
|
||||
# Make sure that we "pop" the image from the queue...
|
||||
if self.is_queued(image_id):
|
||||
fileutils.delete_if_exists(
|
||||
self.get_image_filepath(image_id, 'queue'))
|
||||
|
||||
file_size = os.path.getsize(final_path)
|
||||
|
||||
self.db_api.insert_cache_details(
|
||||
self.context, node_reference_url, image_id, file_size)
|
||||
LOG.debug("Image cached successfully.")
|
||||
|
||||
def rollback(e):
|
||||
if os.path.exists(incomplete_path):
|
||||
invalid_path = self.get_image_filepath(image_id, 'invalid')
|
||||
|
||||
msg = (_LW("Fetch of cache file failed (%(e)s), rolling "
|
||||
"back by moving '%(incomplete_path)s' to "
|
||||
"'%(invalid_path)s'"),
|
||||
{'e': e,
|
||||
'incomplete_path': incomplete_path,
|
||||
'invalid_path': invalid_path})
|
||||
LOG.warning(msg)
|
||||
os.rename(incomplete_path, invalid_path)
|
||||
|
||||
self.db_api.delete_cached_image(
|
||||
self.context, image_id, node_reference_url)
|
||||
|
||||
try:
|
||||
with open(incomplete_path, 'wb') as cache_file:
|
||||
yield cache_file
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
rollback(e)
|
||||
else:
|
||||
commit()
|
||||
finally:
|
||||
# if the generator filling the cache file neither raises an
|
||||
# exception, nor completes fetching all data, neither rollback
|
||||
# nor commit will have been called, so the incomplete file
|
||||
# will persist - in that case remove it as it is unusable
|
||||
# example: ^c from client fetch
|
||||
if os.path.exists(incomplete_path):
|
||||
rollback('incomplete fetch')
|
||||
|
||||
@contextmanager
|
||||
def open_for_read(self, image_id):
|
||||
"""
|
||||
Open and yield file for reading the image file for an image
|
||||
with supplied identifier.
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
path = self.get_image_filepath(image_id)
|
||||
try:
|
||||
with open(path, 'rb') as cache_file:
|
||||
yield cache_file
|
||||
finally:
|
||||
node_reference_url = CONF.worker_self_reference_url
|
||||
self.db_api.update_hit_count(
|
||||
self.context, image_id, node_reference_url)
|
||||
|
||||
def queue_image(self, image_id):
|
||||
"""
|
||||
This adds a image to be cache to the queue.
|
||||
|
||||
If the image already exists in the queue or has already been
|
||||
cached, we return False, True otherwise
|
||||
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
if self.is_cached(image_id):
|
||||
LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id)
|
||||
return False
|
||||
|
||||
if self.is_being_cached(image_id):
|
||||
LOG.info(_LI("Not queueing image '%s'. Already being "
|
||||
"written to cache"), image_id)
|
||||
return False
|
||||
|
||||
if self.is_queued(image_id):
|
||||
LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id)
|
||||
return False
|
||||
|
||||
path = self.get_image_filepath(image_id, 'queue')
|
||||
|
||||
# Touch the file to add it to the queue
|
||||
with open(path, "w"):
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
def delete_invalid_files(self):
|
||||
"""
|
||||
Removes any invalid cache entries
|
||||
"""
|
||||
for path in self.get_cache_files(self.invalid_dir):
|
||||
fileutils.delete_if_exists(path)
|
||||
LOG.info(_LI("Removed invalid cache file %s"), path)
|
||||
|
||||
def delete_stalled_files(self, older_than):
|
||||
"""
|
||||
Removes any incomplete cache entries older than a
|
||||
supplied modified time.
|
||||
|
||||
:param older_than: Files written to on or before this timestamp
|
||||
will be deleted.
|
||||
"""
|
||||
for path in self.get_cache_files(self.incomplete_dir):
|
||||
if os.path.getmtime(path) < older_than:
|
||||
try:
|
||||
fileutils.delete_if_exists(path)
|
||||
LOG.info(_LI("Removed stalled cache file %s"), path)
|
||||
except Exception as e:
|
||||
msg = (_LW("Failed to delete file %(path)s. "
|
||||
"Got error: %(e)s"),
|
||||
dict(path=path, e=e))
|
||||
LOG.warning(msg)
|
||||
|
||||
def get_queued_images(self):
|
||||
"""
|
||||
Returns a list of image IDs that are in the queue. The
|
||||
list should be sorted by the time the image ID was inserted
|
||||
into the queue.
|
||||
"""
|
||||
files = [f for f in self.get_cache_files(self.queue_dir)]
|
||||
items = []
|
||||
for path in files:
|
||||
mtime = os.path.getmtime(path)
|
||||
items.append((mtime, os.path.basename(path)))
|
||||
|
||||
items.sort()
|
||||
return [image_id for (modtime, image_id) in items]
|
||||
|
||||
def get_cache_files(self, basepath):
|
||||
"""
|
||||
Returns cache files in the supplied directory
|
||||
|
||||
:param basepath: Directory to look in for cache files
|
||||
"""
|
||||
for fname in os.listdir(basepath):
|
||||
path = os.path.join(basepath, fname)
|
||||
if os.path.isfile(path) and not path.endswith(".db"):
|
||||
yield path
|
||||
|
||||
|
||||
def delete_cached_file(path):
|
||||
LOG.debug("Deleting image cache file '%s'", path)
|
||||
fileutils.delete_if_exists(path)
|
|
@ -0,0 +1,555 @@
|
|||
# Copyright 2024 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import datetime
|
||||
import errno
|
||||
import io
|
||||
import os
|
||||
import time
|
||||
from unittest import mock
|
||||
|
||||
from oslo_utils import fileutils
|
||||
|
||||
from glance.image_cache.drivers import centralized_db
|
||||
from glance.tests import functional
|
||||
|
||||
|
||||
DATA = b'IMAGEDATA'
|
||||
|
||||
|
||||
class TestCentralizedDb(functional.SynchronousAPIBase):
|
||||
# ToDo(abhishekk): Once system scope is enabled and RBAC is fully
|
||||
# supported, enable these tests for RBAC as well
|
||||
def setUp(self):
|
||||
super(TestCentralizedDb, self).setUp()
|
||||
|
||||
def start_server(self, enable_cache=True, set_worker_url=True):
|
||||
if set_worker_url:
|
||||
self.config(worker_self_reference_url='http://workerx')
|
||||
self.config(image_cache_driver='centralized_db')
|
||||
|
||||
super(TestCentralizedDb, self).start_server(enable_cache=enable_cache)
|
||||
|
||||
def load_data(self):
|
||||
output = {}
|
||||
# Create 1 queued image as well for testing
|
||||
path = "/v2/images"
|
||||
data = {
|
||||
'name': 'queued-image',
|
||||
'container_format': 'bare',
|
||||
'disk_format': 'raw'
|
||||
}
|
||||
response = self.api_post(path, json=data)
|
||||
self.assertEqual(201, response.status_code)
|
||||
image_id = response.json['id']
|
||||
output['queued'] = image_id
|
||||
|
||||
for visibility in ['public', 'private']:
|
||||
data = {
|
||||
'name': '%s-image' % visibility,
|
||||
'visibility': visibility,
|
||||
'container_format': 'bare',
|
||||
'disk_format': 'raw'
|
||||
}
|
||||
response = self.api_post(path, json=data)
|
||||
self.assertEqual(201, response.status_code)
|
||||
image_id = response.json['id']
|
||||
# Upload some data to image
|
||||
response = self.api_put(
|
||||
'/v2/images/%s/file' % image_id,
|
||||
headers={'Content-Type': 'application/octet-stream'},
|
||||
data=DATA)
|
||||
self.assertEqual(204, response.status_code)
|
||||
output[visibility] = image_id
|
||||
|
||||
return output
|
||||
|
||||
def wait_for_caching(self, image_id, max_sec=10, delay_sec=0.2,
|
||||
start_delay_sec=None):
|
||||
start_time = time.time()
|
||||
done_time = start_time + max_sec
|
||||
if start_delay_sec:
|
||||
time.sleep(start_delay_sec)
|
||||
while time.time() <= done_time:
|
||||
output = self.list_cache()['cached_images']
|
||||
output = [image['image_id'] for image in output]
|
||||
if output and image_id in output:
|
||||
return
|
||||
time.sleep(delay_sec)
|
||||
|
||||
msg = "Image {0} failed to cached within {1} sec"
|
||||
raise Exception(msg.format(image_id, max_sec))
|
||||
|
||||
def list_cache(self, expected_code=200):
|
||||
path = '/v2/cache'
|
||||
response = self.api_get(path)
|
||||
self.assertEqual(expected_code, response.status_code)
|
||||
if response.status_code == 200:
|
||||
return response.json
|
||||
|
||||
def test_centralized_db_worker_url_not_set(self):
|
||||
try:
|
||||
self.config(image_cache_driver='centralized_db')
|
||||
self.start_server(enable_cache=True, set_worker_url=False)
|
||||
except RuntimeError as e:
|
||||
expected_message = "'worker_self_reference_url' needs to be set " \
|
||||
"if `centralized_db` is defined as cache " \
|
||||
"driver for image_cache_driver config option."
|
||||
self.assertIn(expected_message, e.args)
|
||||
|
||||
def test_centralized_db_verify_worker_node_is_set(self):
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.assertEqual(
|
||||
'http://workerx', self.driver.db_api.node_reference_get_by_url(
|
||||
self.driver.context, 'http://workerx').node_reference_url)
|
||||
|
||||
def test_get_cache_size(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify initially cache size is 0
|
||||
self.assertEqual(0, self.driver.get_cache_size())
|
||||
|
||||
# Cache one image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
|
||||
# Verify cache size is equal to len(DATA) i.e. 9
|
||||
self.assertEqual(len(DATA), self.driver.get_cache_size())
|
||||
|
||||
def test_get_hit_count(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify that hit count is currently 0 as no image is cached
|
||||
self.assertEqual(0, self.driver.get_hit_count(images['public']))
|
||||
|
||||
# Cache one image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
|
||||
# Verify that hit count is still 0 as image is cached, but
|
||||
# not downloaded yet from cache
|
||||
self.assertEqual(0, self.driver.get_hit_count(images['public']))
|
||||
|
||||
# Download the image
|
||||
path = '/v2/images/%s/file' % images['public']
|
||||
response = self.api_get(path)
|
||||
self.assertEqual('IMAGEDATA', response.text)
|
||||
# Verify that hit count is 1 as we hit the cache
|
||||
self.assertEqual(1, self.driver.get_hit_count(images['public']))
|
||||
|
||||
# Download the image again
|
||||
path = '/v2/images/%s/file' % images['public']
|
||||
response = self.api_get(path)
|
||||
self.assertEqual('IMAGEDATA', response.text)
|
||||
# Verify that hit count is 2 as we hit the cache
|
||||
self.assertEqual(2, self.driver.get_hit_count(images['public']))
|
||||
|
||||
def test_get_cached_images(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify that initially there are no cached image(s)
|
||||
self.assertEqual(0, len(self.driver.get_cached_images()))
|
||||
|
||||
# Cache one image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
|
||||
# Verify that there is one cached imgae now
|
||||
self.assertEqual(1, len(self.driver.get_cached_images()))
|
||||
|
||||
# Verify that passing non-existing node will be
|
||||
# returned as 0 cached images
|
||||
self.config(worker_self_reference_url="http://fake-worker")
|
||||
self.assertEqual(0, len(self.driver.get_cached_images()))
|
||||
|
||||
def test_is_cacheable(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify that is_cacheable will return true as image is not cached yet
|
||||
self.assertTrue(self.driver.is_cacheable(images['public']))
|
||||
|
||||
# Now cache the image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
|
||||
# Verify that now above image is not cachable
|
||||
self.assertFalse(self.driver.is_cacheable(images['public']))
|
||||
|
||||
def test_is_being_cached(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify that is_being_cached will return False as
|
||||
# image is not cached yet
|
||||
self.assertFalse(self.driver.is_being_cached(images['public']))
|
||||
|
||||
def test_is_queued(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify that is_queued will return False as
|
||||
# image is not queued for caching yet
|
||||
self.assertFalse(self.driver.is_queued(images['public']))
|
||||
|
||||
# Now queue image for caching
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.assertTrue(self.driver.is_queued(images['public']))
|
||||
|
||||
def test_delete_cached_image(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify deleting non-existing image from cache will not fail
|
||||
self.driver.delete_cached_image('fake-image-id')
|
||||
|
||||
# Now cache the image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
self.assertEqual(1, len(self.driver.get_cached_images()))
|
||||
|
||||
# Delete the image from cache
|
||||
self.driver.delete_cached_image(images['public'])
|
||||
|
||||
# Verify image is deleted from cache
|
||||
self.assertFalse(self.driver.is_cached(images['public']))
|
||||
self.assertEqual(0, len(self.driver.get_cached_images()))
|
||||
|
||||
def test_delete_all_cached_images(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify no image is cached yet
|
||||
self.assertEqual(0, len(self.driver.get_cached_images()))
|
||||
|
||||
# Verify delete call should not fail even if no images are cached
|
||||
self.driver.delete_all_cached_images()
|
||||
|
||||
# Now cache the image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
self.assertEqual(1, len(self.driver.get_cached_images()))
|
||||
|
||||
# Now cache another image
|
||||
path = '/v2/cache/%s' % images['private']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['private'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['private']))
|
||||
self.assertEqual(2, len(self.driver.get_cached_images()))
|
||||
|
||||
# Delete all the images form cache
|
||||
self.driver.delete_all_cached_images()
|
||||
|
||||
# Verify images are deleted from cache
|
||||
self.assertEqual(0, len(self.driver.get_cached_images()))
|
||||
|
||||
def test_delete_queued_image(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify deleting non-existing image from queued dir will not fail
|
||||
self.driver.delete_queued_image('fake-image-id')
|
||||
|
||||
# Now queue imgae for caching
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
# verify image is queued
|
||||
self.assertTrue(self.driver.is_queued(images['public']))
|
||||
self.assertEqual(1, len(self.driver.get_queued_images()))
|
||||
|
||||
# Delete the image from queued dir
|
||||
self.driver.delete_queued_image(images['public'])
|
||||
|
||||
# Verify image is deleted from cache
|
||||
self.assertFalse(self.driver.is_queued(images['public']))
|
||||
self.assertEqual(0, len(self.driver.get_queued_images()))
|
||||
|
||||
def test_delete_all_queued_images(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
# Verify no image is cached yet
|
||||
self.assertEqual(0, len(self.driver.get_queued_images()))
|
||||
|
||||
# Verify delete call should not fail even if no images are queued
|
||||
self.driver.delete_all_queued_images()
|
||||
|
||||
# Now queue the image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
# verify image is queued
|
||||
self.assertTrue(self.driver.is_queued(images['public']))
|
||||
self.assertEqual(1, len(self.driver.get_queued_images()))
|
||||
|
||||
# Now queue another image
|
||||
path = '/v2/cache/%s' % images['private']
|
||||
self.api_put(path)
|
||||
# verify image is queued
|
||||
self.assertTrue(self.driver.is_queued(images['private']))
|
||||
self.assertEqual(2, len(self.driver.get_queued_images()))
|
||||
|
||||
# Delete all the images form queued dir
|
||||
self.driver.delete_all_queued_images()
|
||||
|
||||
# Verify images are deleted from cache
|
||||
self.assertEqual(0, len(self.driver.get_queued_images()))
|
||||
|
||||
def test_clean(self):
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
cache_dir = os.path.join(self.test_dir, 'cache')
|
||||
incomplete_file_path = os.path.join(cache_dir, 'incomplete', '1')
|
||||
incomplete_file = open(incomplete_file_path, 'wb')
|
||||
incomplete_file.write(DATA)
|
||||
incomplete_file.close()
|
||||
|
||||
self.assertTrue(os.path.exists(incomplete_file_path))
|
||||
|
||||
self.delay_inaccurate_clock()
|
||||
self.driver.clean(stall_time=0)
|
||||
|
||||
self.assertFalse(os.path.exists(incomplete_file_path))
|
||||
|
||||
def _test_clean_stall_time(
|
||||
self, stall_time=None, days=2, stall_failed=False):
|
||||
"""
|
||||
Test the clean method removes the stalled images as expected
|
||||
"""
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
|
||||
cache_dir = os.path.join(self.test_dir, 'cache')
|
||||
incomplete_file_path_1 = os.path.join(cache_dir,
|
||||
'incomplete', '1')
|
||||
incomplete_file_path_2 = os.path.join(cache_dir,
|
||||
'incomplete', '2')
|
||||
|
||||
for f in (incomplete_file_path_1, incomplete_file_path_2):
|
||||
incomplete_file = open(f, 'wb')
|
||||
incomplete_file.write(DATA)
|
||||
incomplete_file.close()
|
||||
|
||||
mtime = os.path.getmtime(incomplete_file_path_1)
|
||||
pastday = (datetime.datetime.fromtimestamp(mtime) -
|
||||
datetime.timedelta(days=days))
|
||||
atime = int(time.mktime(pastday.timetuple()))
|
||||
mtime = atime
|
||||
os.utime(incomplete_file_path_1, (atime, mtime))
|
||||
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_1))
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_2))
|
||||
|
||||
# If stall_time is None then it will wait for default time
|
||||
# of `image_cache_stall_time` which is 24 hours
|
||||
if stall_failed:
|
||||
with mock.patch.object(
|
||||
fileutils, 'delete_if_exists') as mock_delete:
|
||||
mock_delete.side_effect = OSError(errno.ENOENT, '')
|
||||
self.driver.clean(stall_time=stall_time)
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_1))
|
||||
else:
|
||||
self.driver.clean(stall_time=stall_time)
|
||||
self.assertFalse(os.path.exists(incomplete_file_path_1))
|
||||
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_2))
|
||||
|
||||
def test_clean_stalled_none_stall_time(self):
|
||||
self._test_clean_stall_time()
|
||||
|
||||
def test_clean_stalled_nonzero_stall_time(self):
|
||||
"""Test the clean method removes expected images."""
|
||||
self._test_clean_stall_time(stall_time=3600, days=1)
|
||||
|
||||
def test_clean_stalled_fails(self):
|
||||
"""Test the clean method fails to delete file, ignores the failure"""
|
||||
self._test_clean_stall_time(stall_time=3600, days=1,
|
||||
stall_failed=True)
|
||||
|
||||
def test_least_recently_accessed(self):
|
||||
self.start_server(enable_cache=True)
|
||||
images = self.load_data()
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
|
||||
# Verify no image is cached yet
|
||||
self.assertEqual(0, len(self.driver.get_cached_images()))
|
||||
|
||||
# Verify delete call should not fail even if no images are cached
|
||||
self.driver.delete_all_cached_images()
|
||||
|
||||
# Now cache the image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
self.assertEqual(1, len(self.driver.get_cached_images()))
|
||||
|
||||
# Now cache another image
|
||||
path = '/v2/cache/%s' % images['private']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['private'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['private']))
|
||||
self.assertEqual(2, len(self.driver.get_cached_images()))
|
||||
|
||||
# Verify that 1st image will be returned
|
||||
image_id, size = self.driver.get_least_recently_accessed()
|
||||
self.assertEqual(images['public'], image_id)
|
||||
self.assertEqual(len(DATA), size)
|
||||
|
||||
def test_open_for_write_good(self):
|
||||
"""
|
||||
Test to see if open_for_write works in normal case
|
||||
"""
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
|
||||
# test a good case
|
||||
image_id = '1'
|
||||
self.assertFalse(self.driver.is_cached(image_id))
|
||||
with self.driver.open_for_write(image_id) as cache_file:
|
||||
cache_file.write(b'a')
|
||||
self.assertTrue(self.driver.is_cached(image_id),
|
||||
"Image %s was NOT cached!" % image_id)
|
||||
|
||||
# make sure it has tidied up
|
||||
cache_dir = os.path.join(self.test_dir, 'cache')
|
||||
incomplete_file_path = os.path.join(cache_dir,
|
||||
'incomplete', image_id)
|
||||
cache_file_path = os.path.join(cache_dir, image_id)
|
||||
invalid_file_path = os.path.join(cache_dir, 'invalid', image_id)
|
||||
self.assertFalse(os.path.exists(incomplete_file_path))
|
||||
self.assertFalse(os.path.exists(invalid_file_path))
|
||||
self.assertTrue(os.path.exists(cache_file_path))
|
||||
|
||||
def test_open_for_write_with_exception(self):
|
||||
"""
|
||||
Test to see if open_for_write works in a failure case for each driver
|
||||
This case is where an exception is raised while the file is being
|
||||
written. The image is partially filled in cache and filling won't
|
||||
resume so verify the image is moved to invalid/ directory
|
||||
"""
|
||||
# test a case where an exception is raised while the file is open
|
||||
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
|
||||
image_id = '1'
|
||||
self.assertFalse(self.driver.is_cached(image_id))
|
||||
try:
|
||||
with self.driver.open_for_write(image_id):
|
||||
raise IOError
|
||||
except Exception as e:
|
||||
self.assertIsInstance(e, IOError)
|
||||
self.assertFalse(self.driver.is_cached(image_id),
|
||||
"Image %s was cached!" % image_id)
|
||||
# make sure it has tidied up
|
||||
cache_dir = os.path.join(self.test_dir, 'cache')
|
||||
incomplete_file_path = os.path.join(cache_dir,
|
||||
'incomplete', image_id)
|
||||
invalid_file_path = os.path.join(cache_dir, 'invalid', image_id)
|
||||
self.assertFalse(os.path.exists(incomplete_file_path))
|
||||
self.assertTrue(os.path.exists(invalid_file_path))
|
||||
|
||||
def test_open_for_read_good(self):
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
images = self.load_data()
|
||||
|
||||
self.assertFalse(self.driver.is_cached(images['public']))
|
||||
# Cache one image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
# verify cache hit count for above image is 0
|
||||
self.assertEqual(0, self.driver.get_hit_count(images['public']))
|
||||
# Open image for read
|
||||
buff = io.BytesIO()
|
||||
with self.driver.open_for_read(images['public']) as cache_file:
|
||||
for chunk in cache_file:
|
||||
buff.write(chunk)
|
||||
|
||||
self.assertEqual(DATA, buff.getvalue())
|
||||
# verify now cache hit count for above image is 1
|
||||
self.assertEqual(1, self.driver.get_hit_count(images['public']))
|
||||
|
||||
def test_open_for_read_with_exception(self):
|
||||
self.start_server(enable_cache=True)
|
||||
self.driver = centralized_db.Driver()
|
||||
self.driver.configure()
|
||||
images = self.load_data()
|
||||
|
||||
self.assertFalse(self.driver.is_cached(images['public']))
|
||||
# Cache one image
|
||||
path = '/v2/cache/%s' % images['public']
|
||||
self.api_put(path)
|
||||
self.wait_for_caching(images['public'])
|
||||
# verify image is cached
|
||||
self.assertTrue(self.driver.is_cached(images['public']))
|
||||
# verify cache hit count for above image is 0
|
||||
self.assertEqual(0, self.driver.get_hit_count(images['public']))
|
||||
# Open image for read
|
||||
buff = io.BytesIO()
|
||||
try:
|
||||
with self.driver.open_for_read(images['public']):
|
||||
raise IOError
|
||||
except Exception as e:
|
||||
self.assertIsInstance(e, IOError)
|
||||
|
||||
self.assertEqual(b'', buff.getvalue())
|
||||
# verify now cache hit count for above image is 1 even exception is
|
||||
# raised
|
||||
self.assertEqual(1, self.driver.get_hit_count(images['public']))
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
from contextlib import contextmanager
|
||||
import datetime
|
||||
import errno
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
|
@ -24,6 +25,7 @@ from unittest import mock
|
|||
import fixtures
|
||||
import glance_store as store
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import fileutils
|
||||
from oslo_utils import secretutils
|
||||
from oslo_utils import units
|
||||
|
||||
|
@ -129,6 +131,32 @@ class ImageCacheTestCase(object):
|
|||
for image_id in (1, 2):
|
||||
self.assertFalse(self.cache.is_cached(image_id))
|
||||
|
||||
def _test_clean_invalid_path(self, failure=False):
|
||||
invalid_file_path = os.path.join(self.cache_dir, 'invalid', '1')
|
||||
invalid_file = open(invalid_file_path, 'wb')
|
||||
invalid_file.write(FIXTURE_DATA)
|
||||
invalid_file.close()
|
||||
|
||||
self.assertTrue(os.path.exists(invalid_file_path))
|
||||
|
||||
self.delay_inaccurate_clock()
|
||||
if failure:
|
||||
with mock.patch.object(
|
||||
fileutils, 'delete_if_exists') as mock_delete:
|
||||
mock_delete.side_effect = OSError(errno.ENOENT, '')
|
||||
try:
|
||||
self.cache.clean()
|
||||
except OSError:
|
||||
self.assertTrue(os.path.exists(invalid_file_path))
|
||||
else:
|
||||
self.cache.clean()
|
||||
self.assertFalse(os.path.exists(invalid_file_path))
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_invalid_path(self):
|
||||
"""Test the clean method removes expected image from invalid path."""
|
||||
self._test_clean_invalid_path()
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_stalled(self):
|
||||
"""Test the clean method removes expected images."""
|
||||
|
@ -144,8 +172,8 @@ class ImageCacheTestCase(object):
|
|||
|
||||
self.assertFalse(os.path.exists(incomplete_file_path))
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_stalled_nonzero_stall_time(self):
|
||||
def _test_clean_stall_time(self, stall_time=None, days=2,
|
||||
stall_failed=False):
|
||||
"""
|
||||
Test the clean method removes the stalled images as expected
|
||||
"""
|
||||
|
@ -160,7 +188,7 @@ class ImageCacheTestCase(object):
|
|||
|
||||
mtime = os.path.getmtime(incomplete_file_path_1)
|
||||
pastday = (datetime.datetime.fromtimestamp(mtime) -
|
||||
datetime.timedelta(days=1))
|
||||
datetime.timedelta(days=days))
|
||||
atime = int(time.mktime(pastday.timetuple()))
|
||||
mtime = atime
|
||||
os.utime(incomplete_file_path_1, (atime, mtime))
|
||||
|
@ -168,11 +196,29 @@ class ImageCacheTestCase(object):
|
|||
self.assertTrue(os.path.exists(incomplete_file_path_1))
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_2))
|
||||
|
||||
self.cache.clean(stall_time=3600)
|
||||
# If stall_time is None then it will wait for default time
|
||||
# of `image_cache_stall_time` which is 24 hours
|
||||
if stall_failed:
|
||||
with mock.patch.object(
|
||||
fileutils, 'delete_if_exists') as mock_delete:
|
||||
mock_delete.side_effect = OSError(errno.ENOENT, '')
|
||||
self.cache.clean(stall_time=stall_time)
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_1))
|
||||
else:
|
||||
self.cache.clean(stall_time=stall_time)
|
||||
self.assertFalse(os.path.exists(incomplete_file_path_1))
|
||||
|
||||
self.assertFalse(os.path.exists(incomplete_file_path_1))
|
||||
self.assertTrue(os.path.exists(incomplete_file_path_2))
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_stalled_none_stall_time(self):
|
||||
self._test_clean_stall_time()
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_stalled_nonzero_stall_time(self):
|
||||
"""Test the clean method removes expected images."""
|
||||
self._test_clean_stall_time(stall_time=3600, days=1)
|
||||
|
||||
@skip_if_disabled
|
||||
def test_prune(self):
|
||||
"""
|
||||
|
@ -280,6 +326,19 @@ class ImageCacheTestCase(object):
|
|||
|
||||
self.cache.delete_cached_image(1)
|
||||
|
||||
# Test that we return false if image is being cached
|
||||
incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', '1')
|
||||
incomplete_file = open(incomplete_file_path, 'wb')
|
||||
incomplete_file.write(FIXTURE_DATA)
|
||||
incomplete_file.close()
|
||||
|
||||
self.assertFalse(self.cache.is_queued(1))
|
||||
self.assertFalse(self.cache.is_cached(1))
|
||||
self.assertTrue(self.cache.driver.is_being_cached(1))
|
||||
|
||||
self.assertFalse(self.cache.queue_image(1))
|
||||
self.cache.clean(stall_time=0)
|
||||
|
||||
for x in range(3):
|
||||
self.assertTrue(self.cache.queue_image(x))
|
||||
|
||||
|
@ -421,7 +480,11 @@ class ImageCacheTestCase(object):
|
|||
md5.update(image)
|
||||
checksum = md5.hexdigest()
|
||||
|
||||
cache = image_cache.ImageCache()
|
||||
with mock.patch('glance.db.get_api') as mock_get_db:
|
||||
db = unit_test_utils.FakeDB(initialize=False)
|
||||
mock_get_db.return_value = db
|
||||
cache = image_cache.ImageCache()
|
||||
|
||||
img_iter = cache.get_caching_iter(image_id, checksum, [image])
|
||||
for chunk in img_iter:
|
||||
pass
|
||||
|
@ -434,7 +497,11 @@ class ImageCacheTestCase(object):
|
|||
image_id = 123
|
||||
checksum = "foobar" # bad.
|
||||
|
||||
cache = image_cache.ImageCache()
|
||||
with mock.patch('glance.db.get_api') as mock_get_db:
|
||||
db = unit_test_utils.FakeDB(initialize=False)
|
||||
mock_get_db.return_value = db
|
||||
cache = image_cache.ImageCache()
|
||||
|
||||
img_iter = cache.get_caching_iter(image_id, checksum, [image])
|
||||
|
||||
def reader():
|
||||
|
@ -486,6 +553,68 @@ class TestImageCacheXattr(test_utils.BaseTestCase,
|
|||
return
|
||||
|
||||
|
||||
class TestImageCacheCentralizedDb(test_utils.BaseTestCase,
|
||||
ImageCacheTestCase):
|
||||
|
||||
"""Tests image caching when Centralized DB is used in cache"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestImageCacheCentralizedDb, self).setUp()
|
||||
|
||||
self.inited = True
|
||||
self.disabled = False
|
||||
self.cache_dir = self.useFixture(fixtures.TempDir()).path
|
||||
self.config(image_cache_dir=self.cache_dir,
|
||||
image_cache_driver='centralized_db',
|
||||
image_cache_max_size=5 * units.Ki,
|
||||
worker_self_reference_url='http://workerx')
|
||||
|
||||
with mock.patch('glance.db.get_api') as mock_get_db:
|
||||
self.db = unit_test_utils.FakeDB(initialize=False)
|
||||
mock_get_db.return_value = self.db
|
||||
self.cache = image_cache.ImageCache()
|
||||
|
||||
def test_node_reference_create_duplicate(self):
|
||||
with mock.patch('glance.db.get_api') as mock_get_db:
|
||||
self.db = unit_test_utils.FakeDB(initialize=False)
|
||||
mock_get_db.return_value = self.db
|
||||
with mock.patch.object(
|
||||
self.db, 'node_reference_create') as mock_node_create:
|
||||
mock_node_create.side_effect = exception.Duplicate
|
||||
with mock.patch.object(
|
||||
image_cache.drivers.centralized_db, 'LOG') as mock_log:
|
||||
image_cache.ImageCache()
|
||||
expected_calls = [
|
||||
mock.call('Node reference is already recorded, '
|
||||
'ignoring it')
|
||||
]
|
||||
mock_log.debug.assert_has_calls(expected_calls)
|
||||
|
||||
def test_get_least_recently_accessed_os_error(self):
|
||||
self.assertEqual(0, self.cache.get_cache_size())
|
||||
for x in range(10):
|
||||
FIXTURE_FILE = io.BytesIO(FIXTURE_DATA)
|
||||
self.assertTrue(self.cache.cache_image_file(x, FIXTURE_FILE))
|
||||
|
||||
self.assertEqual(10 * units.Ki, self.cache.get_cache_size())
|
||||
|
||||
with mock.patch.object(os, 'stat') as mock_stat:
|
||||
mock_stat.side_effect = OSError
|
||||
image_id, size = self.cache.driver.get_least_recently_accessed()
|
||||
self.assertEqual(0, size)
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_stalled_fails(self):
|
||||
"""Test the clean method fails to delete file, ignores the failure"""
|
||||
self._test_clean_stall_time(stall_time=3600, days=1,
|
||||
stall_failed=True)
|
||||
|
||||
@skip_if_disabled
|
||||
def test_clean_invalid_path_fails(self):
|
||||
"""Test the clean method fails to remove image from invalid path."""
|
||||
self._test_clean_invalid_path(failure=True)
|
||||
|
||||
|
||||
class TestImageCacheSqlite(test_utils.BaseTestCase,
|
||||
ImageCacheTestCase):
|
||||
|
||||
|
|
Loading…
Reference in New Issue