Merge "cloud: Remove old cloud-layer caching functionality"

This commit is contained in:
Zuul 2023-09-13 14:24:32 +00:00 committed by Gerrit Code Review
commit 0782176022
16 changed files with 9 additions and 973 deletions

View File

@ -20,19 +20,10 @@ from openstack import exceptions
from openstack import warnings as os_warnings
def _no_pending_volumes(volumes):
"""If there are any volumes not in a steady state, don't cache"""
for volume in volumes:
if volume['status'] not in ('available', 'error', 'in-use'):
return False
return True
class BlockStorageCloudMixin:
block_storage: Proxy
# TODO(stephenfin): Remove 'cache' in a future major version
@_utils.cache_on_arguments(should_cache_fn=_no_pending_volumes)
def list_volumes(self, cache=True):
"""List all available volumes.
@ -47,7 +38,6 @@ class BlockStorageCloudMixin:
return list(self.block_storage.volumes())
# TODO(stephenfin): Remove 'get_extra' in a future major version
@_utils.cache_on_arguments()
def list_volume_types(self, get_extra=None):
"""List all available volume types.
@ -166,8 +156,6 @@ class BlockStorageCloudMixin:
volume = self.block_storage.create_volume(**kwargs)
self.list_volumes.invalidate(self)
if volume['status'] == 'error':
raise exc.OpenStackCloudException("Error in creating volume")
@ -195,8 +183,6 @@ class BlockStorageCloudMixin:
volume = self.block_storage.update_volume(volume, **kwargs)
self.list_volumes.invalidate(self)
return volume
def set_volume_bootable(self, name_or_id, bootable=True):
@ -240,8 +226,6 @@ class BlockStorageCloudMixin:
:raises: OpenStackCloudTimeout if wait time exceeded.
:raises: OpenStackCloudException on operation error.
"""
self.list_volumes.invalidate(self)
volume = self.block_storage.find_volume(name_or_id)
if not volume:
@ -257,7 +241,6 @@ class BlockStorageCloudMixin:
self.log.exception("error in deleting volume")
raise
self.list_volumes.invalidate(self)
if wait:
self.block_storage.wait_for_delete(volume, wait=timeout)

View File

@ -15,7 +15,6 @@ from openstack.cloud import exc
class CoeCloudMixin:
@_utils.cache_on_arguments()
def list_coe_clusters(self):
"""List COE (Container Orchestration Engine) cluster.
@ -90,7 +89,6 @@ class CoeCloudMixin:
**kwargs,
)
self.list_coe_clusters.invalidate(self)
return cluster
def delete_coe_cluster(self, name_or_id):
@ -114,7 +112,6 @@ class CoeCloudMixin:
return False
self.container_infrastructure_management.delete_cluster(cluster)
self.list_coe_clusters.invalidate(self)
return True
def update_coe_cluster(self, name_or_id, **kwargs):
@ -127,7 +124,6 @@ class CoeCloudMixin:
:raises: OpenStackCloudException on operation error.
"""
self.list_coe_clusters.invalidate(self)
cluster = self.get_coe_cluster(name_or_id)
if not cluster:
raise exc.OpenStackCloudException(
@ -169,7 +165,6 @@ class CoeCloudMixin:
cluster_uuid=cluster_id, csr=csr
)
@_utils.cache_on_arguments()
def list_cluster_templates(self, detail=False):
"""List cluster templates.

View File

@ -115,7 +115,6 @@ class ComputeCloudMixin:
)
)
@_utils.cache_on_arguments()
def _nova_extensions(self):
extensions = set([e.alias for e in self.compute.extensions()])
return extensions
@ -194,7 +193,6 @@ class ComputeCloudMixin:
filters = {}
return list(self.compute.keypairs(**filters))
@_utils.cache_on_arguments()
def list_availability_zone_names(self, unavailable=False):
"""List names of availability zones.
@ -216,7 +214,6 @@ class ComputeCloudMixin:
)
return []
@_utils.cache_on_arguments()
def list_flavors(self, get_extra=False):
"""List all available flavors.
@ -1093,8 +1090,6 @@ class ComputeCloudMixin:
'source_type': 'volume',
}
kwargs['block_device_mapping_v2'].append(block_mapping)
if boot_volume or boot_from_volume or volumes:
self.list_volumes.invalidate(self)
return kwargs
def wait_for_server(
@ -1379,18 +1374,6 @@ class ComputeCloudMixin:
if not wait:
return True
# If the server has volume attachments, or if it has booted
# from volume, deleting it will change volume state so we will
# need to invalidate the cache. Avoid the extra API call if
# caching is not enabled.
reset_volume_cache = False
if (
self.cache_enabled
and self.has_service('volume')
and self.get_volumes(server)
):
reset_volume_cache = True
if not isinstance(server, _server.Server):
# We might come here with Munch object (at the moment).
# If this is the case - convert it into real server to be able to
@ -1398,9 +1381,6 @@ class ComputeCloudMixin:
server = _server.Server(id=server['id'])
self.compute.wait_for_delete(server, wait=timeout)
if reset_volume_cache:
self.list_volumes.invalidate(self)
return True
@_utils.valid_kwargs('name', 'description')

View File

@ -61,7 +61,6 @@ class IdentityCloudMixin:
ret.update(self._get_project_id_param_dict(project))
return ret
@_utils.cache_on_arguments()
def list_projects(self, domain_id=None, name_or_id=None, filters=None):
"""List projects.
@ -186,7 +185,6 @@ class IdentityCloudMixin:
if enabled is not None:
kwargs.update({'enabled': enabled})
project = self.identity.update_project(project, **kwargs)
self.list_projects.invalidate(self)
return project
def create_project(
@ -242,7 +240,6 @@ class IdentityCloudMixin:
return False
@_utils.valid_kwargs('domain_id', 'name')
@_utils.cache_on_arguments()
def list_users(self, **kwargs):
"""List users.
@ -340,7 +337,6 @@ class IdentityCloudMixin:
'default_project',
)
def update_user(self, name_or_id, **kwargs):
self.list_users.invalidate(self)
user_kwargs = {}
if 'domain_id' in kwargs and kwargs['domain_id']:
user_kwargs['domain_id'] = kwargs['domain_id']
@ -355,7 +351,6 @@ class IdentityCloudMixin:
del kwargs['domain_id']
user = self.identity.update_user(user, **kwargs)
self.list_users.invalidate(self)
return user
def create_user(
@ -378,13 +373,10 @@ class IdentityCloudMixin:
user = self.identity.create_user(**params)
self.list_users.invalidate(self)
return user
@_utils.valid_kwargs('domain_id')
def delete_user(self, name_or_id, **kwargs):
# TODO(mordred) Why are we invalidating at the TOP?
self.list_users.invalidate(self)
try:
user = self.get_user(name_or_id, **kwargs)
if not user:
@ -394,7 +386,6 @@ class IdentityCloudMixin:
return False
self.identity.delete_user(user)
self.list_users.invalidate(self)
return True
except exceptions.SDKException:
@ -891,7 +882,6 @@ class IdentityCloudMixin:
return self.identity.get_domain(domain_id)
@_utils.valid_kwargs('domain_id')
@_utils.cache_on_arguments()
def list_groups(self, **kwargs):
"""List Keystone groups.
@ -969,7 +959,6 @@ class IdentityCloudMixin:
group = self.identity.create_group(**group_ref)
self.list_groups.invalidate(self)
return group
def update_group(
@ -988,7 +977,6 @@ class IdentityCloudMixin:
:raises: ``OpenStackCloudException``: if something goes wrong during
the OpenStack API call.
"""
self.list_groups.invalidate(self)
group = self.identity.find_group(name_or_id, **kwargs)
if group is None:
raise exc.OpenStackCloudException(
@ -1003,7 +991,6 @@ class IdentityCloudMixin:
group = self.identity.update_group(group, **group_ref)
self.list_groups.invalidate(self)
return group
def delete_group(self, name_or_id):
@ -1022,7 +1009,6 @@ class IdentityCloudMixin:
self.identity.delete_group(group)
self.list_groups.invalidate(self)
return True
except exceptions.SDKException:

View File

@ -16,14 +16,6 @@ from openstack.image.v2._proxy import Proxy
from openstack import utils
def _no_pending_images(images):
"""If there are any images not in a steady state, don't cache"""
for image in images:
if image.status not in ('active', 'deleted', 'killed'):
return False
return True
class ImageCloudMixin:
image: Proxy
@ -34,7 +26,6 @@ class ImageCloudMixin:
images = self.list_images()
return _utils._filter_list(images, name_or_id, filters)
@_utils.cache_on_arguments(should_cache_fn=_no_pending_images)
def list_images(self, filter_deleted=True, show_all=False):
"""Get available images.
@ -170,7 +161,6 @@ class ImageCloudMixin:
for count in utils.iterate_timeout(
timeout, "Timeout waiting for image to snapshot"
):
self.list_images.invalidate(self)
image = self.get_image(image_id)
if not image:
continue
@ -203,7 +193,6 @@ class ImageCloudMixin:
if not image:
return False
self.image.delete_image(image)
self.list_images.invalidate(self)
# Task API means an image was uploaded to swift
# TODO(gtema) does it make sense to move this into proxy?
@ -221,7 +210,6 @@ class ImageCloudMixin:
for count in utils.iterate_timeout(
timeout, "Timeout waiting for the image to be deleted."
):
self._get_cache(None).invalidate()
if self.get_image(image.id) is None:
break
return True
@ -321,9 +309,9 @@ class ImageCloudMixin:
**kwargs,
)
self._get_cache(None).invalidate()
if not wait:
return image
try:
for count in utils.iterate_timeout(
timeout, "Timeout waiting for the image to finish."

View File

@ -19,7 +19,6 @@ from openstack.network.v2._proxy import Proxy
class NetworkCloudMixin:
network: Proxy
@_utils.cache_on_arguments()
def _neutron_extensions(self):
extensions = set()
for extension in self.network.extensions():

View File

@ -171,7 +171,6 @@ class ObjectStoreCloudMixin:
"Could not determine container access for ACL: %s." % acl
)
@_utils.cache_on_arguments()
def get_object_capabilities(self):
"""Get infomation about the object-storage service

View File

@ -16,15 +16,6 @@ from openstack.orchestration.util import event_utils
from openstack.orchestration.v1._proxy import Proxy
def _no_pending_stacks(stacks):
"""If there are any stacks not in a steady state, don't cache"""
for stack in stacks:
status = stack['stack_status']
if '_COMPLETE' not in status and '_FAILED' not in status:
return False
return True
class OrchestrationCloudMixin:
orchestration: Proxy
@ -226,7 +217,6 @@ class OrchestrationCloudMixin:
stacks = self.list_stacks()
return _utils._filter_list(stacks, name_or_id, filters)
@_utils.cache_on_arguments(should_cache_fn=_no_pending_stacks)
def list_stacks(self, **query):
"""List all stacks.

View File

@ -14,7 +14,6 @@
import contextlib
import fnmatch
import functools
import inspect
import re
import uuid
@ -27,9 +26,6 @@ from openstack import _log
from openstack.cloud import exc
_decorated_methods = []
def _dictify_resource(resource):
if isinstance(resource, list):
return [_dictify_resource(r) for r in resource]
@ -230,45 +226,6 @@ def valid_kwargs(*valid_args):
return func_wrapper
def _func_wrap(f):
# NOTE(morgan): This extra wrapper is intended to eliminate ever
# passing a bound method to dogpile.cache's cache_on_arguments. In
# 0.7.0 and later it is impossible to pass bound methods to the
# decorator. This was introduced when utilizing the decorate module in
# lieu of a direct wrap implementation.
@functools.wraps(f)
def inner(*args, **kwargs):
return f(*args, **kwargs)
return inner
def cache_on_arguments(*cache_on_args, **cache_on_kwargs):
_cache_name = cache_on_kwargs.pop('resource', None)
def _inner_cache_on_arguments(func):
def _cache_decorator(obj, *args, **kwargs):
the_method = obj._get_cache(_cache_name).cache_on_arguments(
*cache_on_args, **cache_on_kwargs
)(_func_wrap(func.__get__(obj, type(obj))))
return the_method(*args, **kwargs)
def invalidate(obj, *args, **kwargs):
return (
obj._get_cache(_cache_name)
.cache_on_arguments()(func)
.invalidate(*args, **kwargs)
)
_cache_decorator.invalidate = invalidate
_cache_decorator.func = func
_decorated_methods.append(func.__name__)
return _cache_decorator
return _inner_cache_on_arguments
@contextlib.contextmanager
def openstacksdk_exceptions(error_message=None):
"""Context manager for dealing with openstack exceptions.

View File

@ -101,32 +101,6 @@ class _OpenStackCloudMixin:
else:
self.cache_enabled = False
# TODO(gtema): delete it with the standalone cloud layer caching
def _fake_invalidate(unused):
pass
class _FakeCache:
def invalidate(self):
pass
# Don't cache list_servers if we're not caching things.
# Replace this with a more specific cache configuration
# soon.
self._cache = _FakeCache()
# Undecorate cache decorated methods. Otherwise the call stacks
# wind up being stupidly long and hard to debug
for method in _utils._decorated_methods:
meth_obj = getattr(self, method, None)
if not meth_obj:
continue
if hasattr(meth_obj, 'invalidate') and hasattr(
meth_obj, 'func'
):
new_func = functools.partial(meth_obj.func, self)
new_func.invalidate = _fake_invalidate
setattr(self, method, new_func)
# Uncoditionally create cache even with a "null" backend
self._cache = self._make_cache(
cache_class, cache_expiration_time, cache_arguments
@ -323,12 +297,6 @@ class _OpenStackCloudMixin:
return generate_key
def _get_cache(self, resource_name):
if resource_name and resource_name in self._resource_caches:
return self._resource_caches[resource_name]
else:
return self._cache
def pprint(self, resource):
"""Wrapper around pprint that groks munch objects"""
# import late since this is a utility function

View File

@ -962,7 +962,6 @@ class Proxy(proxy.Proxy):
server = self._get_resource(_server.Server, server)
image_id = server.create_image(self, name, metadata)
self._connection.list_images.invalidate(self)
image = self._connection.get_image(image_id)
if not wait:

View File

@ -240,8 +240,6 @@ class Proxy(proxy.Proxy):
else:
image = self._create(_image.Image, name=name, **kwargs)
self._connection._get_cache(None).invalidate()
return image
def upload_image(self, **attrs):
@ -441,7 +439,6 @@ class Proxy(proxy.Proxy):
if not img_props:
return False
self.put('/images/{id}'.format(id=image.id), headers=img_props)
self._connection.list_images.invalidate(self._connection)
return True
def update_image_properties(

View File

@ -341,8 +341,6 @@ class Proxy(proxy.Proxy):
image_kwargs['name'] = name
image = self._create(_image.Image, **image_kwargs)
self._connection._get_cache(None).invalidate()
return image
def import_image(
@ -734,7 +732,6 @@ class Proxy(proxy.Proxy):
}
glance_task = self.create_task(**task_args)
self._connection.list_images.invalidate(self)
if wait:
start = time.time()
@ -771,7 +768,6 @@ class Proxy(proxy.Proxy):
# Clean up after ourselves. The object we created is not
# needed after the import is done.
self._connection.delete_object(container, name)
self._connection.list_images.invalidate(self)
return image
else:
return glance_task
@ -964,8 +960,6 @@ class Proxy(proxy.Proxy):
self.update_image(image, **img_props)
self._connection.list_images.invalidate(self._connection)
return True
def add_tag(self, image, tag):

View File

@ -208,6 +208,7 @@ class Proxy(adapter.Adapter):
self._report_stats(None, url, method, e)
raise
# TODO(stephenfin): service_type is unused and should be dropped
@functools.lru_cache(maxsize=256)
def _extract_name(self, url, service_type=None, project_id=None):
"""Produce a key name to use in logging/metrics from the URL path.

View File

@ -1,807 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
import openstack
from openstack.block_storage.v3 import volume as _volume
import openstack.cloud
from openstack.cloud import meta
from openstack.compute.v2 import flavor as _flavor
from openstack import exceptions
from openstack.identity.v3 import project as _project
from openstack.identity.v3 import user as _user
from openstack.image.v2 import image as _image
from openstack.network.v2 import port as _port
from openstack.test import fakes as _fakes
from openstack.tests import fakes
from openstack.tests.unit import base
from openstack.tests.unit.cloud import test_port
# Mock out the gettext function so that the task schema can be copypasta
def _(msg):
return msg
_TASK_PROPERTIES = {
"id": {
"description": _("An identifier for the task"),
"pattern": _(
'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'
),
"type": "string",
},
"type": {
"description": _("The type of task represented by this content"),
"enum": [
"import",
],
"type": "string",
},
"status": {
"description": _("The current status of this task"),
"enum": ["pending", "processing", "success", "failure"],
"type": "string",
},
"input": {
"description": _("The parameters required by task, JSON blob"),
"type": ["null", "object"],
},
"result": {
"description": _("The result of current task, JSON blob"),
"type": ["null", "object"],
},
"owner": {
"description": _("An identifier for the owner of this task"),
"type": "string",
},
"message": {
"description": _(
"Human-readable informative message only included"
" when appropriate (usually on failure)"
),
"type": "string",
},
"expires_at": {
"description": _(
"Datetime when this resource would be subject to removal"
),
"type": ["null", "string"],
},
"created_at": {
"description": _("Datetime when this resource was created"),
"type": "string",
},
"updated_at": {
"description": _("Datetime when this resource was updated"),
"type": "string",
},
'self': {'type': 'string'},
'schema': {'type': 'string'},
}
_TASK_SCHEMA = dict(
name='Task',
properties=_TASK_PROPERTIES,
additionalProperties=False,
)
class TestMemoryCache(base.TestCase):
def setUp(self):
super(TestMemoryCache, self).setUp(
cloud_config_fixture='clouds_cache.yaml'
)
def _compare_images(self, exp, real):
self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False),
)
def _compare_volumes(self, exp, real):
self.assertDictEqual(
_volume.Volume(**exp).to_dict(computed=False),
real.to_dict(computed=False),
)
def test_openstack_cloud(self):
self.assertIsInstance(self.cloud, openstack.connection.Connection)
def _compare_projects(self, exp, real):
self.assertDictEqual(
_project.Project(**exp).to_dict(computed=False),
real.to_dict(computed=False),
)
def _compare_users(self, exp, real):
self.assertDictEqual(
_user.User(**exp).to_dict(computed=False),
real.to_dict(computed=False),
)
def test_list_projects_v3(self):
project_one = self._get_project_data()
project_two = self._get_project_data()
project_list = [project_one, project_two]
first_response = {'projects': [project_one.json_response['project']]}
second_response = {
'projects': [p.json_response['project'] for p in project_list]
}
mock_uri = self.get_mock_url(
service_type='identity', resource='projects', base_url_append='v3'
)
self.register_uris(
[
dict(
method='GET',
uri=mock_uri,
status_code=200,
json=first_response,
),
dict(
method='GET',
uri=mock_uri,
status_code=200,
json=second_response,
),
]
)
for a, b in zip(
first_response['projects'], self.cloud.list_projects()
):
self._compare_projects(a, b)
# invalidate the list_projects cache
self.cloud.list_projects.invalidate(self.cloud)
for a, b in zip(
second_response['projects'], self.cloud.list_projects()
):
self._compare_projects(a, b)
self.assert_calls()
def test_list_volumes(self):
fake_volume = fakes.FakeVolume(
'volume1', 'available', 'Volume 1 Display Name'
)
fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume(
'volume2', 'available', 'Volume 2 Display Name'
)
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris(
[
self.get_cinder_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict]},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict, fake_volume2_dict]},
),
]
)
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b)
# this call should hit the cache
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b)
self.cloud.list_volumes.invalidate(self.cloud)
for a, b in zip(
[fake_volume_dict, fake_volume2_dict], self.cloud.list_volumes()
):
self._compare_volumes(a, b)
self.assert_calls()
def test_list_volumes_creating_invalidates(self):
fake_volume = fakes.FakeVolume(
'volume1', 'creating', 'Volume 1 Display Name'
)
fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume(
'volume2', 'available', 'Volume 2 Display Name'
)
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris(
[
self.get_cinder_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict]},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volume_dict, fake_volume2_dict]},
),
]
)
for a, b in zip([fake_volume_dict], self.cloud.list_volumes()):
self._compare_volumes(a, b)
for a, b in zip(
[fake_volume_dict, fake_volume2_dict], self.cloud.list_volumes()
):
self._compare_volumes(a, b)
self.assert_calls()
def test_create_volume_invalidates(self):
fake_volb4 = meta.obj_to_munch(
fakes.FakeVolume('volume1', 'available', '')
)
_id = '12345'
fake_vol_creating = meta.obj_to_munch(
fakes.FakeVolume(_id, 'creating', '')
)
fake_vol_avail = meta.obj_to_munch(
fakes.FakeVolume(_id, 'available', '')
)
def now_deleting(request, context):
fake_vol_avail['status'] = 'deleting'
self.register_uris(
[
self.get_cinder_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4]},
),
dict(
method='POST',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes']
),
json={'volume': fake_vol_creating},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_creating},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_avail},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4, fake_vol_avail]},
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json={'volume': fake_vol_avail},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
json=now_deleting,
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', _id]
),
status_code=404,
),
dict(
method='GET',
uri=self.get_mock_url(
'volumev3', 'public', append=['volumes', 'detail']
),
json={'volumes': [fake_volb4, fake_vol_avail]},
),
]
)
for a, b in zip([fake_volb4], self.cloud.list_volumes()):
self._compare_volumes(a, b)
volume = dict(
display_name='junk_vol',
size=1,
display_description='test junk volume',
)
self.cloud.create_volume(wait=True, timeout=2, **volume)
# If cache was not invalidated, we would not see our own volume here
# because the first volume was available and thus would already be
# cached.
for a, b in zip(
[fake_volb4, fake_vol_avail], self.cloud.list_volumes()
):
self._compare_volumes(a, b)
self.cloud.delete_volume(_id)
# And now delete and check same thing since list is cached as all
# available
for a, b in zip([fake_volb4], self.cloud.list_volumes()):
self._compare_volumes(a, b)
self.assert_calls()
def test_list_users(self):
user_data = self._get_user_data(email='test@example.com')
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
service_type='identity',
resource='users',
base_url_append='v3',
),
status_code=200,
json={'users': [user_data.json_response['user']]},
)
]
)
users = self.cloud.list_users()
self.assertEqual(1, len(users))
self.assertEqual(user_data.user_id, users[0]['id'])
self.assertEqual(user_data.name, users[0]['name'])
self.assertEqual(user_data.email, users[0]['email'])
self.assert_calls()
def test_modify_user_invalidates_cache(self):
self.use_keystone_v2()
user_data = self._get_user_data(email='test@example.com')
new_resp = {'user': user_data.json_response['user'].copy()}
new_resp['user']['email'] = 'Nope@Nope.Nope'
new_req = {'user': {'email': new_resp['user']['email']}}
mock_users_url = self.get_mock_url(
service_type='identity', interface='admin', resource='users'
)
mock_user_resource_url = self.get_mock_url(
service_type='identity',
interface='admin',
resource='users',
append=[user_data.user_id],
)
empty_user_list_resp = {'users': []}
users_list_resp = {'users': [user_data.json_response['user']]}
updated_users_list_resp = {'users': [new_resp['user']]}
# Password is None in the original create below
del user_data.json_request['user']['password']
uris_to_mock = [
# Inital User List is Empty
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=empty_user_list_resp,
),
# POST to create the user
# GET to get the user data after POST
dict(
method='POST',
uri=mock_users_url,
status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request),
),
# List Users Call
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=users_list_resp,
),
# List users to get ID for update
# Get user using user_id from list
# Update user
# Get updated user
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=users_list_resp,
),
dict(
method='PUT',
uri=mock_user_resource_url,
status_code=200,
json=new_resp,
validate=dict(json=new_req),
),
# List Users Call
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=updated_users_list_resp,
),
# List User to get ID for delete
# delete user
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=updated_users_list_resp,
),
dict(method='DELETE', uri=mock_user_resource_url, status_code=204),
# List Users Call (empty post delete)
dict(
method='GET',
uri=mock_users_url,
status_code=200,
json=empty_user_list_resp,
),
]
self.register_uris(uris_to_mock)
# first cache an empty list
self.assertEqual([], self.cloud.list_users())
# now add one
created = self.cloud.create_user(
name=user_data.name, email=user_data.email
)
self.assertEqual(user_data.user_id, created['id'])
self.assertEqual(user_data.name, created['name'])
self.assertEqual(user_data.email, created['email'])
# Cache should have been invalidated
users = self.cloud.list_users()
self.assertEqual(user_data.user_id, users[0]['id'])
self.assertEqual(user_data.name, users[0]['name'])
self.assertEqual(user_data.email, users[0]['email'])
# Update and check to see if it is updated
updated = self.cloud.update_user(
user_data.user_id, email=new_resp['user']['email']
)
self.assertEqual(user_data.user_id, updated.id)
self.assertEqual(user_data.name, updated.name)
self.assertEqual(new_resp['user']['email'], updated.email)
users = self.cloud.list_users()
self.assertEqual(1, len(users))
self.assertEqual(user_data.user_id, users[0]['id'])
self.assertEqual(user_data.name, users[0]['name'])
self.assertEqual(new_resp['user']['email'], users[0]['email'])
# Now delete and ensure it disappears
self.cloud.delete_user(user_data.user_id)
self.assertEqual([], self.cloud.list_users())
self.assert_calls()
def test_list_flavors(self):
mock_uri = '{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
)
flavors = list(_fakes.generate_fake_resources(_flavor.Flavor, count=2))
uris_to_mock = [
dict(
method='GET',
uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}
),
json={'flavors': []},
),
dict(
method='GET',
uri=mock_uri,
validate=dict(
headers={'OpenStack-API-Version': 'compute 2.53'}
),
json={'flavors': flavors},
),
]
self.use_compute_discovery()
self.register_uris(uris_to_mock)
self.assertEqual([], self.cloud.list_flavors())
self.assertEqual([], self.cloud.list_flavors())
self.cloud.list_flavors.invalidate(self.cloud)
self.assertResourceListEqual(
self.cloud.list_flavors(), flavors, _flavor.Flavor
)
self.assert_calls()
def test_list_images(self):
self.use_glance()
fake_image = fakes.make_fake_image(image_id='42')
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'image', 'public', append=['v2', 'images']
),
json={'images': []},
),
dict(
method='GET',
uri=self.get_mock_url(
'image', 'public', append=['v2', 'images']
),
json={'images': [fake_image]},
),
]
)
self.assertEqual([], self.cloud.list_images())
self.assertEqual([], self.cloud.list_images())
self.cloud.list_images.invalidate(self.cloud)
[
self._compare_images(a, b)
for a, b in zip([fake_image], self.cloud.list_images())
]
self.assert_calls()
def test_list_images_caches_deleted_status(self):
self.use_glance()
deleted_image_id = self.getUniqueString()
deleted_image = fakes.make_fake_image(
image_id=deleted_image_id, status='deleted'
)
active_image_id = self.getUniqueString()
active_image = fakes.make_fake_image(image_id=active_image_id)
list_return = {'images': [active_image, deleted_image]}
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=list_return,
),
]
)
[
self._compare_images(a, b)
for a, b in zip([active_image], self.cloud.list_images())
]
[
self._compare_images(a, b)
for a, b in zip([active_image], self.cloud.list_images())
]
# We should only have one call
self.assert_calls()
def test_cache_no_cloud_name(self):
self.use_glance()
self.cloud.name = None
fi = fakes.make_fake_image(image_id=self.getUniqueString())
fi2 = fakes.make_fake_image(image_id=self.getUniqueString())
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi]},
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi, fi2]},
),
]
)
[
self._compare_images(a, b)
for a, b in zip([fi], self.cloud.list_images())
]
# Now test that the list was cached
[
self._compare_images(a, b)
for a, b in zip([fi], self.cloud.list_images())
]
# Invalidation too
self.cloud.list_images.invalidate(self.cloud)
[
self._compare_images(a, b)
for a, b in zip([fi, fi2], self.cloud.list_images())
]
def test_list_ports_filtered(self):
down_port = test_port.TestPort.mock_neutron_port_create_rep['port']
active_port = down_port.copy()
active_port['status'] = 'ACTIVE'
# We're testing to make sure a query string is passed when we're
# caching (cache by url), and that the results are still filtered.
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['status=DOWN'],
),
json={
'ports': [
down_port,
active_port,
]
},
),
]
)
ports = self.cloud.list_ports(filters={'status': 'DOWN'})
for a, b in zip([down_port], ports):
self.assertDictEqual(
_port.Port(**a).to_dict(computed=False),
b.to_dict(computed=False),
)
self.assert_calls()
class TestCacheIgnoresQueuedStatus(base.TestCase):
scenarios = [
('queued', dict(status='queued')),
('saving', dict(status='saving')),
('pending_delete', dict(status='pending_delete')),
]
def setUp(self):
super(TestCacheIgnoresQueuedStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml'
)
self.use_glance()
active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status
)
self.active_list_return = {'images': [self.active_image]}
steady_image_id = self.getUniqueString()
self.steady_image = fakes.make_fake_image(image_id=steady_image_id)
self.steady_list_return = {
'images': [self.active_image, self.steady_image]
}
def _compare_images(self, exp, real):
self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False),
)
def test_list_images_ignores_pending_status(self):
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return,
),
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.steady_list_return,
),
]
)
[
self._compare_images(a, b)
for a, b in zip([self.active_image], self.cloud.list_images())
]
# Should expect steady_image to appear if active wasn't cached
[
self._compare_images(a, b)
for a, b in zip(
[self.active_image, self.steady_image],
self.cloud.list_images(),
)
]
class TestCacheSteadyStatus(base.TestCase):
scenarios = [
('active', dict(status='active')),
('killed', dict(status='killed')),
]
def setUp(self):
super(TestCacheSteadyStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml'
)
self.use_glance()
active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status
)
self.active_list_return = {'images': [self.active_image]}
def _compare_images(self, exp, real):
self.assertDictEqual(
_image.Image(**exp).to_dict(computed=False),
real.to_dict(computed=False),
)
def test_list_images_caches_steady_status(self):
self.register_uris(
[
dict(
method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return,
),
]
)
[
self._compare_images(a, b)
for a, b in zip([self.active_image], self.cloud.list_images())
]
[
self._compare_images(a, b)
for a, b in zip([self.active_image], self.cloud.list_images())
]
# We should only have one call
self.assert_calls()
class TestBogusAuth(base.TestCase):
def setUp(self):
super(TestBogusAuth, self).setUp(
cloud_config_fixture='clouds_cache.yaml'
)
def test_get_auth_bogus(self):
with testtools.ExpectedException(exceptions.ConfigException):
openstack.connect(cloud='_bogus_test_', config=self.config)

View File

@ -0,0 +1,7 @@
---
upgrade:
- |
The cloud-layer caching functionality has been removed in favour of the
proxy-layer caching functionality first introduced in openstacksdk 1.0.0.
This migration to proxy-layer caching was designed to be transparent to
end-users and there should be no user-facing impact from this removal.