Collapse OpenStackSDKAdapter into Proxy

We have two subclasses of keystoneauth1.adapter.Adapter. We never use
OpenStackSDKAdapter directly for anything. Collapse it in to Proxy.

Change-Id: Ia0034348d80804e31867349b0939b37fb2b8f21f
This commit is contained in:
Monty Taylor 2019-03-09 13:47:53 +00:00
parent 47afa68392
commit f9b0911166
11 changed files with 239 additions and 297 deletions

View File

@ -63,7 +63,7 @@ Returned Resources
------------------
Complex objects returned to the caller must be a `munch.Munch` type. The
`openstack._adapter.ShadeAdapter` class makes resources into `munch.Munch`.
`openstack.proxy._ShadeAdapter` class makes resources into `munch.Munch`.
All objects should be normalized. It is shade's purpose in life to make
OpenStack consistent for end users, and this means not trusting the clouds

View File

@ -56,18 +56,12 @@ Each service implements a ``Proxy`` class based on
service's ``Proxy`` exists in ``openstack/compute/v2/_proxy.py``.
The :class:`~openstack.proxy.Proxy` class is based on
:class:`~openstack._adapter.OpenStackSDKAdapter` which is in turn based on
:class:`~keystoneauth1.adapter.Adapter`.
.. autoclass:: openstack.proxy.Proxy
:members:
:show-inheritance:
.. autoclass:: openstack._adapter.OpenStackSDKAdapter
:members:
:inherited-members:
:show-inheritance:
Each service's ``Proxy`` provides a higher-level interface for users to work
with via a :class:`~openstack.connection.Connection` instance.

View File

@ -1,139 +0,0 @@
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Wrapper around keystoneauth Adapter to wrap calls in TaskManager '''
try:
import simplejson
JSONDecodeError = simplejson.scanner.JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from six.moves import urllib
from keystoneauth1 import adapter
from openstack import exceptions
def _extract_name(url, service_type=None):
'''Produce a key name to use in logging/metrics from the URL path.
We want to be able to logic/metric sane general things, so we pull
the url apart to generate names. The function returns a list because
there are two different ways in which the elements want to be combined
below (one for logging, one for statsd)
Some examples are likely useful:
/servers -> ['servers']
/servers/{id} -> ['servers']
/servers/{id}/os-security-groups -> ['servers', 'os-security-groups']
/v2.0/networks.json -> ['networks']
'''
url_path = urllib.parse.urlparse(url).path.strip()
# Remove / from the beginning to keep the list indexes of interesting
# things consistent
if url_path.startswith('/'):
url_path = url_path[1:]
# Special case for neutron, which puts .json on the end of urls
if url_path.endswith('.json'):
url_path = url_path[:-len('.json')]
url_parts = url_path.split('/')
if url_parts[-1] == 'detail':
# Special case detail calls
# GET /servers/detail
# returns ['servers', 'detail']
name_parts = url_parts[-2:]
else:
# Strip leading version piece so that
# GET /v2.0/networks
# returns ['networks']
if url_parts[0] in ('v1', 'v2', 'v2.0'):
url_parts = url_parts[1:]
name_parts = []
# Pull out every other URL portion - so that
# GET /servers/{id}/os-security-groups
# returns ['servers', 'os-security-groups']
for idx in range(0, len(url_parts)):
if not idx % 2 and url_parts[idx]:
name_parts.append(url_parts[idx])
# Keystone Token fetching is a special case, so we name it "tokens"
if url_path.endswith('tokens'):
name_parts = ['tokens']
# Getting the root of an endpoint is doing version discovery
if not name_parts:
if service_type == 'object-store':
name_parts = ['account']
else:
name_parts = ['discovery']
# Strip out anything that's empty or None
return [part for part in name_parts if part]
def _json_response(response, result_key=None, error_message=None):
"""Temporary method to use to bridge from ShadeAdapter to SDK calls."""
exceptions.raise_from_response(response, error_message=error_message)
if not response.content:
# This doesn't have any content
return response
# Some REST calls do not return json content. Don't decode it.
if 'application/json' not in response.headers.get('Content-Type'):
return response
try:
result_json = response.json()
except JSONDecodeError:
return response
return result_json
class OpenStackSDKAdapter(adapter.Adapter):
"""Wrapper around keystoneauth1.adapter.Adapter."""
def __init__(
self, session=None,
*args, **kwargs):
super(OpenStackSDKAdapter, self).__init__(
session=session, *args, **kwargs)
def request(
self, url, method, error_message=None,
raise_exc=False, connect_retries=1, *args, **kwargs):
response = super(OpenStackSDKAdapter, self).request(
url, method,
connect_retries=connect_retries, raise_exc=False,
**kwargs)
return response
def _version_matches(self, version):
api_version = self.get_api_major_version()
if api_version:
return api_version[0] == version
return False
class ShadeAdapter(OpenStackSDKAdapter):
"""Wrapper for shade methods that expect json unpacking."""
def request(self, url, method, error_message=None, **kwargs):
response = super(ShadeAdapter, self).request(url, method, **kwargs)
return _json_response(response, error_message=error_message)

View File

@ -38,7 +38,6 @@ import requestsexceptions
import keystoneauth1.exceptions
import keystoneauth1.session
from openstack import _adapter
from openstack import _log
from openstack import exceptions
from openstack.cloud import exc
@ -49,6 +48,7 @@ from openstack.cloud import meta
from openstack.cloud import _utils
import openstack.config
import openstack.config.defaults
from openstack import proxy
from openstack import utils
DEFAULT_SERVER_AGE = 5
@ -444,7 +444,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
request_min_version = config_version
request_max_version = '{version}.latest'.format(
version=config_major)
adapter = _adapter.ShadeAdapter(
adapter = proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
@ -456,7 +456,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if adapter.get_endpoint():
return adapter
adapter = _adapter.ShadeAdapter(
adapter = proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
@ -494,7 +494,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
# object.
def _get_raw_client(
self, service_type, api_version=None, endpoint_override=None):
return _adapter.ShadeAdapter(
return proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
@ -802,7 +802,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
to fail.
"""
if isinstance(data, requests.models.Response):
data = _adapter._json_response(data)
data = proxy._json_response(data)
return meta.get_and_munchify(key, data)
@_utils.cache_on_arguments()
@ -1416,7 +1416,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
@_utils.cache_on_arguments()
def _nova_extensions(self):
extensions = set()
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/extensions'),
error_message="Error fetching extension list for nova")
@ -1435,7 +1435,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
def _neutron_extensions(self):
extensions = set()
resp = self.network.get('/extensions.json')
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching extension list for neutron")
for extension in self._get_and_munchify('extensions', data):
@ -1633,7 +1633,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A list of ``munch.Munch`` containing keypair info.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-keypairs'),
error_message="Error fetching keypair list")
return self._normalize_keypairs([
@ -1663,7 +1663,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if not filters:
filters = {}
resp = self.network.get("/routers.json", params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching router list")
return self._get_and_munchify('routers', data)
@ -1716,7 +1716,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
def _list_ports(self, filters):
resp = self.network.get("/ports.json", params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching port list")
return self._get_and_munchify('ports', data)
@ -1736,7 +1736,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if not filters:
filters = {}
resp = self.network.get("/qos/rule-types.json", params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS rule types list")
return self._get_and_munchify('rule_types', data)
@ -1761,7 +1761,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
resp = self.network.get(
"/qos/rule-types/{rule_type}.json".format(rule_type=rule_type))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS details of {rule_type} "
"rule type".format(rule_type=rule_type))
@ -1781,7 +1781,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if not filters:
filters = {}
resp = self.network.get("/qos/policies.json", params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS policies list")
return self._get_and_munchify('policies', data)
@ -1867,7 +1867,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
list could not be fetched.
"""
try:
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-availability-zone'))
except exc.OpenStackCloudHTTPError:
self.log.debug(
@ -1892,7 +1892,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A list of flavor ``munch.Munch``.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(
'/flavors/detail', params=dict(is_public='None')),
error_message="Error fetching flavor list")
@ -1904,7 +1904,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
endpoint = "/flavors/{id}/os-extra_specs".format(
id=flavor.id)
try:
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(endpoint),
error_message="Error fetching flavor extra specs")
flavor.extra_specs = self._get_and_munchify(
@ -1941,7 +1941,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if not self._has_secgroups():
return []
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(
'/servers/{server_id}/os-security-groups'.format(
server_id=server['id'])))
@ -1998,7 +1998,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
return False
for sg in security_groups:
_adapter._json_response(self.compute.post(
proxy._json_response(self.compute.post(
'/servers/%s/action' % server['id'],
json={'addSecurityGroup': {'name': sg.name}}))
@ -2026,7 +2026,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
for sg in security_groups:
try:
_adapter._json_response(self.compute.post(
proxy._json_response(self.compute.post(
'/servers/%s/action' % server['id'],
json={'removeSecurityGroup': {'name': sg.name}}))
@ -2062,7 +2062,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if self._use_neutron_secgroups():
# Neutron returns dicts, so no need to convert objects here.
resp = self.network.get('/security-groups.json', params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching security group list")
return self._normalize_secgroups(
@ -2070,7 +2070,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
# Handle nova security groups
else:
data = _adapter._json_response(self.compute.get(
data = proxy._json_response(self.compute.get(
'/os-security-groups', params=filters))
return self._normalize_secgroups(
self._get_and_munchify('security_groups', data))
@ -2147,7 +2147,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A list of server group dicts.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-server-groups'),
error_message="Error fetching server group list")
return self._get_and_munchify('server_groups', data)
@ -2174,7 +2174,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
error_msg = "{msg} for the project: {project} ".format(
msg=error_msg, project=name_or_id)
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/limits', params=params))
limits = self._get_and_munchify('limits', data)
return self._normalize_compute_limits(limits, project_id=project_id)
@ -2209,7 +2209,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
except keystoneauth1.exceptions.catalog.EndpointNotFound:
# We didn't have glance, let's try nova
# If this doesn't work - we just let the exception propagate
response = _adapter._json_response(
response = proxy._json_response(
self.compute.get('/images/detail'))
while 'next' in response:
image_list.extend(meta.obj_list_to_munch(response['images']))
@ -2250,7 +2250,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudUnavailableExtension(
'Floating IP pools extension is not available on target cloud')
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('os-floating-ip-pools'),
error_message="Error fetching floating IP pool list")
pools = self._get_and_munchify('floating_ip_pools', data)
@ -2344,7 +2344,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
def _nova_list_floating_ips(self):
try:
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-floating-ips'))
except exc.OpenStackCloudURINotFound:
return []
@ -2746,7 +2746,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A network ``munch.Munch``.
"""
resp = self.network.get('/networks/{id}'.format(id=id))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error getting network with ID {id}".format(id=id)
)
@ -2807,7 +2807,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A subnet ``munch.Munch``.
"""
resp = self.network.get('/subnets/{id}'.format(id=id))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error getting subnet with ID {id}".format(id=id)
)
@ -2846,7 +2846,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A port ``munch.Munch``.
"""
resp = self.network.get('/ports/{id}'.format(id=id))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error getting port with ID {id}".format(id=id)
)
@ -2985,7 +2985,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
specs.
:returns: A flavor ``munch.Munch``.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/flavors/{id}'.format(id=id)),
error_message="Error getting flavor with ID {id}".format(id=id)
)
@ -2996,7 +2996,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
endpoint = "/flavors/{id}/os-extra_specs".format(
id=flavor.id)
try:
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(endpoint),
error_message="Error fetching flavor extra specs")
flavor.extra_specs = self._get_and_munchify(
@ -3049,9 +3049,9 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
" ID {id}".format(id=id))
if self._use_neutron_secgroups():
resp = self.network.get('/security-groups/{id}'.format(id=id))
data = _adapter._json_response(resp, error_message=error_message)
data = proxy._json_response(resp, error_message=error_message)
else:
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(
'/os-security-groups/{id}'.format(id=id)),
error_message=error_message)
@ -3085,7 +3085,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
return ""
def _get_server_console_output(self, server_id, length=None):
data = _adapter._json_response(self.compute.post(
data = proxy._json_response(self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id),
json={'os-getConsoleOutput': {'length': length}}))
return self._get_and_munchify('output', data)
@ -3138,7 +3138,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
return meta.add_server_interfaces(self, server)
def get_server_by_id(self, id):
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/servers/{id}'.format(id=id)))
server = self._get_and_munchify('server', data)
return meta.add_server_interfaces(self, self._normalize_server(server))
@ -3291,13 +3291,13 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
error_message = "Error getting floating ip with ID {id}".format(id=id)
if self._use_neutron_floating():
data = _adapter._json_response(
data = proxy._json_response(
self.network.get('/floatingips/{id}'.format(id=id)),
error_message=error_message)
return self._normalize_floating_ip(
self._get_and_munchify('floatingip', data))
else:
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format(id=id)),
error_message=error_message)
return self._normalize_floating_ip(
@ -3353,7 +3353,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
}
if public_key:
keypair['public_key'] = public_key
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/os-keypairs',
json={'keypair': keypair}),
@ -3371,7 +3371,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudException on operation error.
"""
try:
_adapter._json_response(self.compute.delete(
proxy._json_response(self.compute.delete(
'/os-keypairs/{name}'.format(name=name)))
except exc.OpenStackCloudURINotFound:
self.log.debug("Keypair %s not found for deleting", name)
@ -3517,7 +3517,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudException(
"Network %s not found." % name_or_id)
data = _adapter._json_response(self.network.put(
data = proxy._json_response(self.network.put(
"/networks/{net_id}.json".format(net_id=network.id),
json={"network": kwargs}),
error_message="Error updating network {0}".format(name_or_id))
@ -3694,7 +3694,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"/qos/policies/{policy_id}/bandwidth_limit_rules.json".format(
policy_id=policy['id']),
params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS bandwidth limit rules from "
"{policy}".format(policy=policy['id']))
@ -3724,7 +3724,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
resp = self.network.get(
"/qos/policies/{policy_id}/bandwidth_limit_rules/{rule_id}.json".
format(policy_id=policy['id'], rule_id=rule_id))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS bandwidth limit rule {rule_id} "
"from {policy}".format(rule_id=rule_id,
@ -3903,7 +3903,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"/qos/policies/{policy_id}/dscp_marking_rules.json".format(
policy_id=policy['id']),
params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS DSCP marking rules from "
"{policy}".format(policy=policy['id']))
@ -3933,7 +3933,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
resp = self.network.get(
"/qos/policies/{policy_id}/dscp_marking_rules/{rule_id}.json".
format(policy_id=policy['id'], rule_id=rule_id))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS DSCP marking rule {rule_id} "
"from {policy}".format(rule_id=rule_id,
@ -4092,7 +4092,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"/qos/policies/{policy_id}/minimum_bandwidth_rules.json".format(
policy_id=policy['id']),
params=filters)
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS minimum bandwidth rules from "
"{policy}".format(policy=policy['id']))
@ -4122,7 +4122,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
resp = self.network.get(
"/qos/policies/{policy_id}/minimum_bandwidth_rules/{rule_id}.json".
format(policy_id=policy['id'], rule_id=rule_id))
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error fetching QoS minimum_bandwidth rule {rule_id}"
" from {policy}".format(rule_id=rule_id,
@ -4271,7 +4271,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if port_id:
json_body['port_id'] = port_id
return _adapter._json_response(
return proxy._json_response(
self.network.put(
"/routers/{router_id}/add_router_interface.json".format(
router_id=router['id']),
@ -4395,7 +4395,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
'target cloud')
router['availability_zone_hints'] = availability_zone_hints
data = _adapter._json_response(
data = proxy._json_response(
self.network.post("/routers.json", json={"router": router}),
error_message="Error creating router {0}".format(name))
return self._get_and_munchify('router', data)
@ -4464,7 +4464,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
resp = self.network.put(
"/routers/{router_id}.json".format(router_id=curr_router['id']),
json={"router": router})
data = _adapter._json_response(
data = proxy._json_response(
resp,
error_message="Error updating router {0}".format(name_or_id))
return self._get_and_munchify('router', data)
@ -4542,7 +4542,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"Server {server} could not be found and therefore"
" could not be snapshotted.".format(server=server))
server = server_obj
response = _adapter._json_response(
response = proxy._json_response(
self.compute.post(
'/servers/{server_id}/action'.format(server_id=server['id']),
json={
@ -4998,7 +4998,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudException on operation error.
"""
_adapter._json_response(self.compute.delete(
proxy._json_response(self.compute.delete(
'/servers/{server_id}/os-volume_attachments/{volume_id}'.format(
server_id=server['id'], volume_id=volume['id'])),
error_message=(
@ -5065,7 +5065,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
payload = {'volumeId': volume['id']}
if device:
payload['device'] = device
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/servers/{server_id}/os-volume_attachments'.format(
server_id=server['id']),
@ -5713,11 +5713,11 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"unable to find a floating ip pool")
pool = pools[0]['name']
data = _adapter._json_response(self.compute.post(
data = proxy._json_response(self.compute.post(
'/os-floating-ips', json=dict(pool=pool)))
pool_ip = self._get_and_munchify('floating_ip', data)
# TODO(mordred) Remove this - it's just for compat
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format(
id=pool_ip['id'])))
return self._get_and_munchify('floating_ip', data)
@ -5774,7 +5774,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
def _neutron_delete_floating_ip(self, floating_ip_id):
try:
_adapter._json_response(self.network.delete(
proxy._json_response(self.network.delete(
"/floatingips/{fip_id}.json".format(fip_id=floating_ip_id),
error_message="unable to delete floating IP"))
except exc.OpenStackCloudResourceNotFound:
@ -5787,7 +5787,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
def _nova_delete_floating_ip(self, floating_ip_id):
try:
_adapter._json_response(
proxy._json_response(
self.compute.delete(
'/os-floating-ips/{id}'.format(id=floating_ip_id)),
error_message='Unable to delete floating IP {fip_id}'.format(
@ -6015,7 +6015,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if fixed_address is not None:
floating_ip_args['fixed_ip_address'] = fixed_address
return _adapter._json_response(
return proxy._json_response(
self.network.put(
"/floatingips/{fip_id}.json".format(fip_id=floating_ip['id']),
json={'floatingip': floating_ip_args}),
@ -6038,7 +6038,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
}
if fixed_address:
body['fixed_address'] = fixed_address
return _adapter._json_response(
return proxy._json_response(
self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id),
json=dict(addFloatingIp=body)),
@ -6091,7 +6091,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"unable to find floating IP {0}".format(floating_ip_id))
error_message = "Error detaching IP {ip} from instance {id}".format(
ip=floating_ip_id, id=server_id)
return _adapter._json_response(
return proxy._json_response(
self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id),
json=dict(removeFloatingIp=dict(
@ -6667,7 +6667,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if 'block_device_mapping_v2' in kwargs:
endpoint = '/os-volumes_boot'
with _utils.shade_exceptions("Error in creating instance"):
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(endpoint, json=server_json))
server = self._get_and_munchify('server', data)
admin_pass = server.get('adminPass') or kwargs.get('admin_pass')
@ -6788,7 +6788,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if admin_pass:
kwargs['adminPass'] = admin_pass
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id),
json={'rebuild': kwargs}),
@ -6838,7 +6838,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudException(
'Invalid Server {server}'.format(server=name_or_id))
_adapter._json_response(
proxy._json_response(
self.compute.post(
'/servers/{server_id}/metadata'.format(server_id=server['id']),
json={'metadata': metadata}),
@ -6862,7 +6862,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
for key in metadata_keys:
error_message = 'Error deleting metadata {key} on {server}'.format(
key=key, server=name_or_id)
_adapter._json_response(
proxy._json_response(
self.compute.delete(
'/servers/{server_id}/metadata/{key}'.format(
server_id=server['id'],
@ -6936,7 +6936,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
self._delete_server_floating_ips(server, delete_ip_retry)
try:
_adapter._json_response(
proxy._json_response(
self.compute.delete(
'/servers/{id}'.format(id=server['id'])),
error_message="Error in deleting server")
@ -7001,7 +7001,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudException(
"failed to find server '{server}'".format(server=name_or_id))
data = _adapter._json_response(
data = proxy._json_response(
self.compute.put(
'/servers/{server_id}'.format(server_id=server['id']),
json={'server': kwargs}),
@ -7020,7 +7020,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudException on operation error.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/os-server-groups',
json={
@ -7046,7 +7046,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
name_or_id)
return False
_adapter._json_response(
proxy._json_response(
self.compute.delete(
'/os-server-groups/{id}'.format(id=server_group['id'])),
error_message="Error deleting server group {name}".format(
@ -7912,7 +7912,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"""
kwargs['network_id'] = network_id
data = _adapter._json_response(
data = proxy._json_response(
self.network.post("/ports.json", json={'port': kwargs}),
error_message="Error creating port for network {0}".format(
network_id))
@ -7980,7 +7980,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudException(
"failed to find port '{port}'".format(port=name_or_id))
data = _adapter._json_response(
data = proxy._json_response(
self.network.put(
"/ports/{port_id}.json".format(port_id=port['id']),
json={"port": kwargs}),
@ -8037,13 +8037,13 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if project_id is not None:
security_group_json['security_group']['tenant_id'] = project_id
if self._use_neutron_secgroups():
data = _adapter._json_response(
data = proxy._json_response(
self.network.post(
'/security-groups.json',
json=security_group_json),
error_message="Error creating security group {0}".format(name))
else:
data = _adapter._json_response(self.compute.post(
data = proxy._json_response(self.compute.post(
'/os-security-groups', json=security_group_json))
return self._normalize_secgroup(
self._get_and_munchify('security_group', data))
@ -8084,7 +8084,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
return True
else:
_adapter._json_response(self.compute.delete(
proxy._json_response(self.compute.delete(
'/os-security-groups/{id}'.format(id=secgroup['id'])))
return True
@ -8113,7 +8113,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"Security group %s not found." % name_or_id)
if self._use_neutron_secgroups():
data = _adapter._json_response(
data = proxy._json_response(
self.network.put(
'/security-groups/{sg_id}.json'.format(sg_id=group['id']),
json={'security_group': kwargs}),
@ -8122,7 +8122,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
else:
for key in ('name', 'description'):
kwargs.setdefault(key, group[key])
data = _adapter._json_response(
data = proxy._json_response(
self.compute.put(
'/os-security-groups/{id}'.format(id=group['id']),
json={'security_group': kwargs}))
@ -8213,7 +8213,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if project_id is not None:
rule_def['tenant_id'] = project_id
data = _adapter._json_response(
data = proxy._json_response(
self.network.post(
'/security-group-rules.json',
json={'security_group_rule': rule_def}),
@ -8259,7 +8259,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if project_id is not None:
security_group_rule_dict[
'security_group_rule']['tenant_id'] = project_id
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/os-security-group-rules',
json=security_group_rule_dict
@ -10495,7 +10495,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
}
if flavorid == 'auto':
payload['id'] = None
data = _adapter._json_response(self.compute.post(
data = proxy._json_response(self.compute.post(
'/flavors',
json=dict(flavor=payload)))
@ -10517,7 +10517,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
"Flavor %s not found for deleting", name_or_id)
return False
_adapter._json_response(
proxy._json_response(
self.compute.delete(
'/flavors/{id}'.format(id=flavor['id'])),
error_message="Unable to delete flavor {name}".format(
@ -10534,7 +10534,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudException on operation error.
:raises: OpenStackCloudResourceNotFound if flavor ID is not found.
"""
_adapter._json_response(
proxy._json_response(
self.compute.post(
"/flavors/{id}/os-extra_specs".format(id=flavor_id),
json=dict(extra_specs=extra_specs)),
@ -10550,7 +10550,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudResourceNotFound if flavor ID is not found.
"""
for key in keys:
_adapter._json_response(
proxy._json_response(
self.compute.delete(
"/flavors/{id}/os-extra_specs/{key}".format(
id=flavor_id, key=key)),
@ -10566,7 +10566,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
access = {'tenant': project_id}
access_key = '{action}TenantAccess'.format(action=action)
_adapter._json_response(
proxy._json_response(
self.compute.post(endpoint, json={access_key: access}))
def add_flavor_access(self, flavor_id, project_id):
@ -10598,7 +10598,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudException on operation error.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(
'/flavors/{id}/os-flavor-access'.format(id=flavor_id)),
error_message=(
@ -10882,7 +10882,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A list of hypervisor ``munch.Munch``.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-hypervisors/detail'),
error_message="Error fetching hypervisor list")
return self._get_and_munchify('hypervisors', data)
@ -10907,7 +10907,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:returns: A list of aggregate dicts.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get('/os-aggregates'),
error_message="Error fetching aggregate list")
return self._get_and_munchify('aggregates', data)
@ -10943,7 +10943,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
:raises: OpenStackCloudException on operation error.
"""
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/os-aggregates',
json={'aggregate': {
@ -10971,7 +10971,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudException(
"Host aggregate %s not found." % name_or_id)
data = _adapter._json_response(
data = proxy._json_response(
self.compute.put(
'/os-aggregates/{id}'.format(id=aggregate['id']),
json={'aggregate': kwargs}),
@ -10993,7 +10993,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
self.log.debug("Aggregate %s not found for deleting", name_or_id)
return False
return _adapter._json_response(
return proxy._json_response(
self.compute.delete(
'/os-aggregates/{id}'.format(id=aggregate['id'])),
error_message="Error deleting aggregate {name}".format(
@ -11020,7 +11020,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
err_msg = "Unable to set metadata for host aggregate {name}".format(
name=name_or_id)
data = _adapter._json_response(
data = proxy._json_response(
self.compute.post(
'/os-aggregates/{id}/action'.format(id=aggregate['id']),
json={'set_metadata': {'metadata': metadata}}),
@ -11043,7 +11043,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
err_msg = "Unable to add host {host} to aggregate {name}".format(
host=host_name, name=name_or_id)
return _adapter._json_response(
return proxy._json_response(
self.compute.post(
'/os-aggregates/{id}/action'.format(id=aggregate['id']),
json={'add_host': {'host': host_name}}),
@ -11065,7 +11065,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
err_msg = "Unable to remove host {host} to aggregate {name}".format(
host=host_name, name=name_or_id)
return _adapter._json_response(
return proxy._json_response(
self.compute.post(
'/os-aggregates/{id}/action'.format(id=aggregate['id']),
json={'remove_host': {'host': host_name}}),
@ -11157,7 +11157,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
# if key in quota.VOLUME_QUOTAS}
kwargs['force'] = True
_adapter._json_response(
proxy._json_response(
self.compute.put(
'/os-quota-sets/{project}'.format(project=proj.id),
json={'quota_set': kwargs}),
@ -11174,7 +11174,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
proj = self.get_project(name_or_id)
if not proj:
raise exc.OpenStackCloudException("project does not exist")
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(
'/os-quota-sets/{project}'.format(project=proj.id)))
return self._get_and_munchify('quota_set', data)
@ -11191,7 +11191,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
proj = self.get_project(name_or_id)
if not proj:
raise exc.OpenStackCloudException("project does not exist")
return _adapter._json_response(
return proxy._json_response(
self.compute.delete(
'/os-quota-sets/{project}'.format(project=proj.id)))
@ -11251,7 +11251,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
raise exc.OpenStackCloudException(
"project does not exist: {}".format(name=proj.id))
data = _adapter._json_response(
data = proxy._json_response(
self.compute.get(
'/os-simple-tenant-usage/{project}'.format(project=proj.id),
params=dict(start=start.isoformat(), end=end.isoformat())),
@ -11352,7 +11352,7 @@ class _OpenStackCloudMixin(_normalize.Normalizer):
if details:
url = url + "/details"
url = url + ".json"
data = _adapter._json_response(
data = proxy._json_response(
self.network.get(url),
error_message=("Error fetching Neutron's quota for "
"project {0}".format(proj.id)))

View File

@ -22,12 +22,12 @@ import os_service_types
import requestsexceptions
from six.moves import urllib
from openstack import _adapter
from openstack import version as openstack_version
from openstack import _log
from openstack.config import _util
from openstack.config import defaults as config_defaults
from openstack import exceptions
from openstack import proxy
def _make_key(key, service_type):
@ -450,7 +450,7 @@ class CloudRegion(object):
def get_session_client(
self, service_type, version=None,
constructor=_adapter.OpenStackSDKAdapter,
constructor=proxy.Proxy,
**kwargs):
"""Return a prepped keystoneauth Adapter for a given service.

View File

@ -18,7 +18,6 @@ from openstack.object_store.v1 import account as _account
from openstack.object_store.v1 import container as _container
from openstack.object_store.v1 import obj as _obj
from openstack.object_store.v1 import info as _info
from openstack import _adapter
from openstack import exceptions
from openstack import _log
from openstack import proxy
@ -568,7 +567,7 @@ class Proxy(proxy.Proxy):
def _upload_object(self, endpoint, filename, headers):
with open(filename, 'rb') as dt:
return _adapter._json_response(self.put(
return proxy._json_response(self.put(
endpoint, headers=headers, data=dt))
def _get_file_segments(self, endpoint, filename, file_size, segment_size):

View File

@ -10,11 +10,80 @@
# License for the specific language governing permissions and limitations
# under the License.
from openstack import _adapter
try:
import simplejson
JSONDecodeError = simplejson.scanner.JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from six.moves import urllib
from keystoneauth1 import adapter
from openstack import exceptions
from openstack import resource
def _extract_name(url, service_type=None):
'''Produce a key name to use in logging/metrics from the URL path.
We want to be able to logic/metric sane general things, so we pull
the url apart to generate names. The function returns a list because
there are two different ways in which the elements want to be combined
below (one for logging, one for statsd)
Some examples are likely useful:
/servers -> ['servers']
/servers/{id} -> ['servers']
/servers/{id}/os-security-groups -> ['servers', 'os-security-groups']
/v2.0/networks.json -> ['networks']
'''
url_path = urllib.parse.urlparse(url).path.strip()
# Remove / from the beginning to keep the list indexes of interesting
# things consistent
if url_path.startswith('/'):
url_path = url_path[1:]
# Special case for neutron, which puts .json on the end of urls
if url_path.endswith('.json'):
url_path = url_path[:-len('.json')]
url_parts = url_path.split('/')
if url_parts[-1] == 'detail':
# Special case detail calls
# GET /servers/detail
# returns ['servers', 'detail']
name_parts = url_parts[-2:]
else:
# Strip leading version piece so that
# GET /v2.0/networks
# returns ['networks']
if url_parts[0] in ('v1', 'v2', 'v2.0'):
url_parts = url_parts[1:]
name_parts = []
# Pull out every other URL portion - so that
# GET /servers/{id}/os-security-groups
# returns ['servers', 'os-security-groups']
for idx in range(0, len(url_parts)):
if not idx % 2 and url_parts[idx]:
name_parts.append(url_parts[idx])
# Keystone Token fetching is a special case, so we name it "tokens"
if url_path.endswith('tokens'):
name_parts = ['tokens']
# Getting the root of an endpoint is doing version discovery
if not name_parts:
if service_type == 'object-store':
name_parts = ['account']
else:
name_parts = ['discovery']
# Strip out anything that's empty or None
return [part for part in name_parts if part]
# The _check_resource decorator is used on Proxy methods to ensure that
# the `actual` argument is in fact the type of the `expected` argument.
# It does so under two cases:
@ -39,7 +108,7 @@ def _check_resource(strict=False):
return wrap
class Proxy(_adapter.OpenStackSDKAdapter):
class Proxy(adapter.Adapter):
"""Represents a service."""
retriable_status_codes = None
@ -56,6 +125,21 @@ class Proxy(_adapter.OpenStackSDKAdapter):
self.retriable_status_codes)
super(Proxy, self).__init__(*args, **kwargs)
def request(
self, url, method, error_message=None,
raise_exc=False, connect_retries=1, *args, **kwargs):
response = super(Proxy, self).request(
url, method,
connect_retries=connect_retries, raise_exc=False,
**kwargs)
return response
def _version_matches(self, version):
api_version = self.get_api_major_version()
if api_version:
return api_version[0] == version
return False
def _get_connection(self):
"""Get the Connection object associated with this Proxy.
@ -307,3 +391,30 @@ class Proxy(_adapter.OpenStackSDKAdapter):
"""
res = self._get_resource(resource_type, value, **attrs)
return res.head(self, base_path=base_path)
def _json_response(response, result_key=None, error_message=None):
"""Temporary method to use to bridge from ShadeAdapter to SDK calls."""
exceptions.raise_from_response(response, error_message=error_message)
if not response.content:
# This doesn't have any content
return response
# Some REST calls do not return json content. Don't decode it.
if 'application/json' not in response.headers.get('Content-Type'):
return response
try:
result_json = response.json()
except JSONDecodeError:
return response
return result_json
class _ShadeAdapter(Proxy):
"""Wrapper for shade methods that expect json unpacking."""
def request(self, url, method, error_message=None, **kwargs):
response = super(_ShadeAdapter, self).request(url, method, **kwargs)
return _json_response(response, error_message=error_message)

View File

@ -17,7 +17,6 @@ import os_service_types
from openstack import _log
from openstack import exceptions
from openstack import proxy
__all__ = [
'ServiceDescription',
@ -55,11 +54,6 @@ class ServiceDescription(object):
:param string service_type:
service_type to look for in the keystone catalog
:param proxy.Proxy proxy_class:
subclass of :class:`~openstack.proxy.Proxy` implementing
an interface for this service. Defaults to
:class:`~openstack.proxy.Proxy` which provides REST operations
but no additional features.
:param list aliases:
Optional list of aliases, if there is more than one name that might
be used to register the service in the catalog.
@ -179,7 +173,6 @@ class ServiceDescription(object):
temp_adapter = config.get_session_client(
self.service_type,
constructor=proxy.Proxy,
allow_version_hack=True,
**version_kwargs
)
@ -197,11 +190,10 @@ class ServiceDescription(object):
category=exceptions.UnsupportedServiceVersion)
return temp_adapter
proxy_class = self.supported_versions.get(str(found_version[0]))
if not proxy_class:
proxy_class = proxy.Proxy
if proxy_class:
version_kwargs['constructor'] = proxy_class
return config.get_session_client(
self.service_type,
constructor=proxy_class,
allow_version_hack=True,
**version_kwargs
)

View File

@ -25,9 +25,9 @@ import sys
from testtools import content
from openstack import _adapter
from openstack.cloud import meta
from openstack.cloud.exc import OpenStackCloudException
from openstack import proxy
from openstack.tests.functional import base
from openstack.tests.functional.cloud.util import pick_flavor
from openstack import utils
@ -175,7 +175,7 @@ class TestFloatingIP(base.BaseFunctionalTest):
self.user_cloud.list_networks())))
else:
# Find network names for nova-net
data = _adapter._json_response(
data = proxy._json_response(
self.user_cloud._conn.compute.get('/os-tenant-networks'))
nets = meta.get_and_munchify('networks', data)
self.addDetail(

View File

@ -1,38 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from openstack import _adapter
from openstack.tests.unit import base
class TestExtractName(base.TestCase):
scenarios = [
('slash_servers_bare', dict(url='/servers', parts=['servers'])),
('slash_servers_arg', dict(url='/servers/1', parts=['servers'])),
('servers_bare', dict(url='servers', parts=['servers'])),
('servers_arg', dict(url='servers/1', parts=['servers'])),
('networks_bare', dict(url='/v2.0/networks', parts=['networks'])),
('networks_arg', dict(url='/v2.0/networks/1', parts=['networks'])),
('tokens', dict(url='/v3/tokens', parts=['tokens'])),
('discovery', dict(url='/', parts=['discovery'])),
('secgroups', dict(
url='/servers/1/os-security-groups',
parts=['servers', 'os-security-groups'])),
]
def test_extract_name(self):
results = _adapter._extract_name(self.url)
self.assertEqual(self.parts, results)

View File

@ -9,6 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
import mock
import munch
@ -460,3 +461,25 @@ class TestProxyHead(base.TestCase):
connection=self.cloud, id=self.fake_id)
self.res.head.assert_called_with(self.sot, base_path=None)
self.assertEqual(rv, self.fake_result)
class TestExtractName(base.TestCase):
scenarios = [
('slash_servers_bare', dict(url='/servers', parts=['servers'])),
('slash_servers_arg', dict(url='/servers/1', parts=['servers'])),
('servers_bare', dict(url='servers', parts=['servers'])),
('servers_arg', dict(url='servers/1', parts=['servers'])),
('networks_bare', dict(url='/v2.0/networks', parts=['networks'])),
('networks_arg', dict(url='/v2.0/networks/1', parts=['networks'])),
('tokens', dict(url='/v3/tokens', parts=['tokens'])),
('discovery', dict(url='/', parts=['discovery'])),
('secgroups', dict(
url='/servers/1/os-security-groups',
parts=['servers', 'os-security-groups'])),
]
def test_extract_name(self):
results = proxy._extract_name(self.url)
self.assertEqual(self.parts, results)