Merge "Split OpenStackCloud into reasonable pieces"
This commit is contained in:
commit
96e513e2f5
|
@ -0,0 +1,718 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import jsonpatch
|
||||
import types # noqa
|
||||
import warnings
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
from openstack import utils
|
||||
|
||||
|
||||
class BaremetalCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _baremetal_client(self):
|
||||
if 'baremetal' not in self._raw_clients:
|
||||
client = self._get_raw_client('baremetal')
|
||||
# Do this to force version discovery. We need to do that, because
|
||||
# the endpoint-override trick we do for neutron because
|
||||
# ironicclient just appends a /v1 won't work and will break
|
||||
# keystoneauth - because ironic's versioned discovery endpoint
|
||||
# is non-compliant and doesn't return an actual version dict.
|
||||
client = self._get_versioned_client(
|
||||
'baremetal', min_version=1, max_version='1.latest')
|
||||
self._raw_clients['baremetal'] = client
|
||||
return self._raw_clients['baremetal']
|
||||
|
||||
def list_nics(self):
|
||||
"""Return a list of all bare metal ports."""
|
||||
return [nic._to_munch() for nic in self.baremetal.ports(details=True)]
|
||||
|
||||
def list_nics_for_machine(self, uuid):
|
||||
"""Returns a list of ports present on the machine node.
|
||||
|
||||
:param uuid: String representing machine UUID value in
|
||||
order to identify the machine.
|
||||
:returns: A list of ports.
|
||||
"""
|
||||
# TODO(dtantsur): support node names here.
|
||||
return [nic._to_munch()
|
||||
for nic in self.baremetal.ports(details=True, node_id=uuid)]
|
||||
|
||||
def get_nic_by_mac(self, mac):
|
||||
"""Get bare metal NIC by its hardware address (usually MAC)."""
|
||||
results = [nic._to_munch()
|
||||
for nic in self.baremetal.ports(address=mac, details=True)]
|
||||
try:
|
||||
return results[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def list_machines(self):
|
||||
"""List Machines.
|
||||
|
||||
:returns: list of ``munch.Munch`` representing machines.
|
||||
"""
|
||||
return [self._normalize_machine(node._to_munch())
|
||||
for node in self.baremetal.nodes()]
|
||||
|
||||
def get_machine(self, name_or_id):
|
||||
"""Get Machine by name or uuid
|
||||
|
||||
Search the baremetal host out by utilizing the supplied id value
|
||||
which can consist of a name or UUID.
|
||||
|
||||
:param name_or_id: A node name or UUID that will be looked up.
|
||||
|
||||
:returns: ``munch.Munch`` representing the node found or None if no
|
||||
nodes are found.
|
||||
"""
|
||||
try:
|
||||
return self._normalize_machine(
|
||||
self.baremetal.get_node(name_or_id)._to_munch())
|
||||
except exc.OpenStackCloudResourceNotFound:
|
||||
return None
|
||||
|
||||
def get_machine_by_mac(self, mac):
|
||||
"""Get machine by port MAC address
|
||||
|
||||
:param mac: Port MAC address to query in order to return a node.
|
||||
|
||||
:returns: ``munch.Munch`` representing the node found or None
|
||||
if the node is not found.
|
||||
"""
|
||||
nic = self.get_nic_by_mac(mac)
|
||||
if nic is None:
|
||||
return None
|
||||
else:
|
||||
return self.get_machine(nic['node_uuid'])
|
||||
|
||||
def inspect_machine(self, name_or_id, wait=False, timeout=3600):
|
||||
"""Inspect a Barmetal machine
|
||||
|
||||
Engages the Ironic node inspection behavior in order to collect
|
||||
metadata about the baremetal machine.
|
||||
|
||||
:param name_or_id: String representing machine name or UUID value in
|
||||
order to identify the machine.
|
||||
|
||||
:param wait: Boolean value controlling if the method is to wait for
|
||||
the desired state to be reached or a failure to occur.
|
||||
|
||||
:param timeout: Integer value, defautling to 3600 seconds, for the$
|
||||
wait state to reach completion.
|
||||
|
||||
:returns: ``munch.Munch`` representing the current state of the machine
|
||||
upon exit of the method.
|
||||
"""
|
||||
|
||||
return_to_available = False
|
||||
|
||||
node = self.baremetal.get_node(name_or_id)
|
||||
|
||||
# NOTE(TheJulia): If in available state, we can do this. However,
|
||||
# we need to to move the machine back to manageable first.
|
||||
if node.provision_state == 'available':
|
||||
if node.instance_id:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Refusing to inspect available machine %(node)s "
|
||||
"which is associated with an instance "
|
||||
"(instance_uuid %(inst)s)" %
|
||||
{'node': node.id, 'inst': node.instance_id})
|
||||
|
||||
return_to_available = True
|
||||
# NOTE(TheJulia): Changing available machine to managedable state
|
||||
# and due to state transitions we need to until that transition has
|
||||
# completed.
|
||||
node = self.baremetal.set_node_provision_state(node, 'manage',
|
||||
wait=True,
|
||||
timeout=timeout)
|
||||
|
||||
if node.provision_state not in ('manageable', 'inspect failed'):
|
||||
raise exc.OpenStackCloudException(
|
||||
"Machine %(node)s must be in 'manageable', 'inspect failed' "
|
||||
"or 'available' provision state to start inspection, the "
|
||||
"current state is %(state)s" %
|
||||
{'node': node.id, 'state': node.provision_state})
|
||||
|
||||
node = self.baremetal.set_node_provision_state(node, 'inspect',
|
||||
wait=True,
|
||||
timeout=timeout)
|
||||
|
||||
if return_to_available:
|
||||
node = self.baremetal.set_node_provision_state(node, 'provide',
|
||||
wait=True,
|
||||
timeout=timeout)
|
||||
|
||||
return node._to_munch()
|
||||
|
||||
def register_machine(self, nics, wait=False, timeout=3600,
|
||||
lock_timeout=600, **kwargs):
|
||||
"""Register Baremetal with Ironic
|
||||
|
||||
Allows for the registration of Baremetal nodes with Ironic
|
||||
and population of pertinant node information or configuration
|
||||
to be passed to the Ironic API for the node.
|
||||
|
||||
This method also creates ports for a list of MAC addresses passed
|
||||
in to be utilized for boot and potentially network configuration.
|
||||
|
||||
If a failure is detected creating the network ports, any ports
|
||||
created are deleted, and the node is removed from Ironic.
|
||||
|
||||
:param nics:
|
||||
An array of MAC addresses that represent the
|
||||
network interfaces for the node to be created.
|
||||
|
||||
Example::
|
||||
|
||||
[
|
||||
{'mac': 'aa:bb:cc:dd:ee:01'},
|
||||
{'mac': 'aa:bb:cc:dd:ee:02'}
|
||||
]
|
||||
|
||||
:param wait: Boolean value, defaulting to false, to wait for the
|
||||
node to reach the available state where the node can be
|
||||
provisioned. It must be noted, when set to false, the
|
||||
method will still wait for locks to clear before sending
|
||||
the next required command.
|
||||
|
||||
:param timeout: Integer value, defautling to 3600 seconds, for the
|
||||
wait state to reach completion.
|
||||
|
||||
:param lock_timeout: Integer value, defaulting to 600 seconds, for
|
||||
locks to clear.
|
||||
|
||||
:param kwargs: Key value pairs to be passed to the Ironic API,
|
||||
including uuid, name, chassis_uuid, driver_info,
|
||||
parameters.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: Returns a ``munch.Munch`` representing the new
|
||||
baremetal node.
|
||||
"""
|
||||
|
||||
msg = ("Baremetal machine node failed to be created.")
|
||||
port_msg = ("Baremetal machine port failed to be created.")
|
||||
|
||||
url = '/nodes'
|
||||
# TODO(TheJulia): At some point we need to figure out how to
|
||||
# handle data across when the requestor is defining newer items
|
||||
# with the older api.
|
||||
machine = self._baremetal_client.post(url,
|
||||
json=kwargs,
|
||||
error_message=msg,
|
||||
microversion="1.6")
|
||||
|
||||
created_nics = []
|
||||
try:
|
||||
for row in nics:
|
||||
payload = {'address': row['mac'],
|
||||
'node_uuid': machine['uuid']}
|
||||
nic = self._baremetal_client.post('/ports',
|
||||
json=payload,
|
||||
error_message=port_msg)
|
||||
created_nics.append(nic['uuid'])
|
||||
|
||||
except Exception as e:
|
||||
self.log.debug("ironic NIC registration failed", exc_info=True)
|
||||
# TODO(mordred) Handle failures here
|
||||
try:
|
||||
for uuid in created_nics:
|
||||
try:
|
||||
port_url = '/ports/{uuid}'.format(uuid=uuid)
|
||||
# NOTE(TheJulia): Added in hope that it is logged.
|
||||
port_msg = ('Failed to delete port {port} for node '
|
||||
'{node}').format(port=uuid,
|
||||
node=machine['uuid'])
|
||||
self._baremetal_client.delete(port_url,
|
||||
error_message=port_msg)
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
version = "1.6"
|
||||
msg = "Baremetal machine failed to be deleted."
|
||||
url = '/nodes/{node_id}'.format(
|
||||
node_id=machine['uuid'])
|
||||
self._baremetal_client.delete(url,
|
||||
error_message=msg,
|
||||
microversion=version)
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error registering NICs with the baremetal service: %s"
|
||||
% str(e))
|
||||
|
||||
with _utils.shade_exceptions(
|
||||
"Error transitioning node to available state"):
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for node transition to "
|
||||
"available state"):
|
||||
|
||||
machine = self.get_machine(machine['uuid'])
|
||||
|
||||
# Note(TheJulia): Per the Ironic state code, a node
|
||||
# that fails returns to enroll state, which means a failed
|
||||
# node cannot be determined at this point in time.
|
||||
if machine['provision_state'] in ['enroll']:
|
||||
self.node_set_provision_state(
|
||||
machine['uuid'], 'manage')
|
||||
elif machine['provision_state'] in ['manageable']:
|
||||
self.node_set_provision_state(
|
||||
machine['uuid'], 'provide')
|
||||
elif machine['last_error'] is not None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Machine encountered a failure: %s"
|
||||
% machine['last_error'])
|
||||
|
||||
# Note(TheJulia): Earlier versions of Ironic default to
|
||||
# None and later versions default to available up until
|
||||
# the introduction of enroll state.
|
||||
# Note(TheJulia): The node will transition through
|
||||
# cleaning if it is enabled, and we will wait for
|
||||
# completion.
|
||||
elif machine['provision_state'] in ['available', None]:
|
||||
break
|
||||
|
||||
else:
|
||||
if machine['provision_state'] in ['enroll']:
|
||||
self.node_set_provision_state(machine['uuid'], 'manage')
|
||||
# Note(TheJulia): We need to wait for the lock to clear
|
||||
# before we attempt to set the machine into provide state
|
||||
# which allows for the transition to available.
|
||||
for count in utils.iterate_timeout(
|
||||
lock_timeout,
|
||||
"Timeout waiting for reservation to clear "
|
||||
"before setting provide state"):
|
||||
machine = self.get_machine(machine['uuid'])
|
||||
if (machine['reservation'] is None
|
||||
and machine['provision_state'] != 'enroll'):
|
||||
# NOTE(TheJulia): In this case, the node has
|
||||
# has moved on from the previous state and is
|
||||
# likely not being verified, as no lock is
|
||||
# present on the node.
|
||||
self.node_set_provision_state(
|
||||
machine['uuid'], 'provide')
|
||||
machine = self.get_machine(machine['uuid'])
|
||||
break
|
||||
|
||||
elif machine['provision_state'] in [
|
||||
'cleaning',
|
||||
'available']:
|
||||
break
|
||||
|
||||
elif machine['last_error'] is not None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Machine encountered a failure: %s"
|
||||
% machine['last_error'])
|
||||
if not isinstance(machine, str):
|
||||
return self._normalize_machine(machine)
|
||||
else:
|
||||
return machine
|
||||
|
||||
def unregister_machine(self, nics, uuid, wait=False, timeout=600):
|
||||
"""Unregister Baremetal from Ironic
|
||||
|
||||
Removes entries for Network Interfaces and baremetal nodes
|
||||
from an Ironic API
|
||||
|
||||
:param nics: An array of strings that consist of MAC addresses
|
||||
to be removed.
|
||||
:param string uuid: The UUID of the node to be deleted.
|
||||
|
||||
:param wait: Boolean value, defaults to false, if to block the method
|
||||
upon the final step of unregistering the machine.
|
||||
|
||||
:param timeout: Integer value, representing seconds with a default
|
||||
value of 600, which controls the maximum amount of
|
||||
time to block the method's completion on.
|
||||
|
||||
:raises: OpenStackCloudException on operation failure.
|
||||
"""
|
||||
|
||||
machine = self.get_machine(uuid)
|
||||
invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed']
|
||||
if machine['provision_state'] in invalid_states:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error unregistering node '%s' due to current provision "
|
||||
"state '%s'" % (uuid, machine['provision_state']))
|
||||
|
||||
# NOTE(TheJulia) There is a high possibility of a lock being present
|
||||
# if the machine was just moved through the state machine. This was
|
||||
# previously concealed by exception retry logic that detected the
|
||||
# failure, and resubitted the request in python-ironicclient.
|
||||
try:
|
||||
self.wait_for_baremetal_node_lock(machine, timeout=timeout)
|
||||
except exc.OpenStackCloudException as e:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error unregistering node '%s': Exception occured while"
|
||||
" waiting to be able to proceed: %s" % (machine['uuid'], e))
|
||||
|
||||
for nic in nics:
|
||||
port_msg = ("Error removing NIC {nic} from baremetal API for "
|
||||
"node {uuid}").format(nic=nic, uuid=uuid)
|
||||
port_url = '/ports/detail?address={mac}'.format(mac=nic['mac'])
|
||||
port = self._baremetal_client.get(port_url, microversion=1.6,
|
||||
error_message=port_msg)
|
||||
port_url = '/ports/{uuid}'.format(uuid=port['ports'][0]['uuid'])
|
||||
_utils._call_client_and_retry(self._baremetal_client.delete,
|
||||
port_url, retry_on=[409, 503],
|
||||
error_message=port_msg)
|
||||
|
||||
with _utils.shade_exceptions(
|
||||
"Error unregistering machine {node_id} from the baremetal "
|
||||
"API".format(node_id=uuid)):
|
||||
|
||||
# NOTE(TheJulia): While this should not matter microversion wise,
|
||||
# ironic assumes all calls without an explicit microversion to be
|
||||
# version 1.0. Ironic expects to deprecate support for older
|
||||
# microversions in future releases, as such, we explicitly set
|
||||
# the version to what we have been using with the client library..
|
||||
version = "1.6"
|
||||
msg = "Baremetal machine failed to be deleted"
|
||||
url = '/nodes/{node_id}'.format(
|
||||
node_id=uuid)
|
||||
_utils._call_client_and_retry(self._baremetal_client.delete,
|
||||
url, retry_on=[409, 503],
|
||||
error_message=msg,
|
||||
microversion=version)
|
||||
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for machine to be deleted"):
|
||||
if not self.get_machine(uuid):
|
||||
break
|
||||
|
||||
def patch_machine(self, name_or_id, patch):
|
||||
"""Patch Machine Information
|
||||
|
||||
This method allows for an interface to manipulate node entries
|
||||
within Ironic.
|
||||
|
||||
:param string name_or_id: A machine name or UUID to be updated.
|
||||
:param patch:
|
||||
The JSON Patch document is a list of dictonary objects
|
||||
that comply with RFC 6902 which can be found at
|
||||
https://tools.ietf.org/html/rfc6902.
|
||||
|
||||
Example patch construction::
|
||||
|
||||
patch=[]
|
||||
patch.append({
|
||||
'op': 'remove',
|
||||
'path': '/instance_info'
|
||||
})
|
||||
patch.append({
|
||||
'op': 'replace',
|
||||
'path': '/name',
|
||||
'value': 'newname'
|
||||
})
|
||||
patch.append({
|
||||
'op': 'add',
|
||||
'path': '/driver_info/username',
|
||||
'value': 'administrator'
|
||||
})
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: ``munch.Munch`` representing the newly updated node.
|
||||
"""
|
||||
node = self.baremetal.get_node(name_or_id)
|
||||
microversion = node._get_microversion_for(self._baremetal_client,
|
||||
'commit')
|
||||
msg = ("Error updating machine via patch operation on node "
|
||||
"{node}".format(node=name_or_id))
|
||||
url = '/nodes/{node_id}'.format(node_id=node.id)
|
||||
return self._normalize_machine(
|
||||
self._baremetal_client.patch(url,
|
||||
json=patch,
|
||||
microversion=microversion,
|
||||
error_message=msg))
|
||||
|
||||
def update_machine(self, name_or_id, **attrs):
|
||||
"""Update a machine with new configuration information
|
||||
|
||||
A user-friendly method to perform updates of a machine, in whole or
|
||||
part.
|
||||
|
||||
:param string name_or_id: A machine name or UUID to be updated.
|
||||
:param attrs: Attributes to updated on the machine.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: ``munch.Munch`` containing a machine sub-dictonary consisting
|
||||
of the updated data returned from the API update operation,
|
||||
and a list named changes which contains all of the API paths
|
||||
that received updates.
|
||||
"""
|
||||
machine = self.get_machine(name_or_id)
|
||||
if not machine:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Machine update failed to find Machine: %s. " % name_or_id)
|
||||
|
||||
new_config = dict(machine, **attrs)
|
||||
|
||||
try:
|
||||
patch = jsonpatch.JsonPatch.from_diff(machine, new_config)
|
||||
except Exception as e:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Machine update failed - Error generating JSON patch object "
|
||||
"for submission to the API. Machine: %s Error: %s"
|
||||
% (name_or_id, e))
|
||||
|
||||
if not patch:
|
||||
return dict(
|
||||
node=machine,
|
||||
changes=None
|
||||
)
|
||||
|
||||
change_list = [change['path'] for change in patch]
|
||||
node = self.baremetal.update_node(machine, **attrs)
|
||||
return dict(
|
||||
node=self._normalize_machine(node._to_munch()),
|
||||
changes=change_list
|
||||
)
|
||||
|
||||
def attach_port_to_machine(self, name_or_id, port_name_or_id):
|
||||
"""Attach a virtual port to the bare metal machine.
|
||||
|
||||
:param string name_or_id: A machine name or UUID.
|
||||
:param string port_name_or_id: A port name or UUID.
|
||||
Note that this is a Network service port, not a bare metal NIC.
|
||||
:return: Nothing.
|
||||
"""
|
||||
machine = self.get_machine(name_or_id)
|
||||
port = self.get_port(port_name_or_id)
|
||||
self.baremetal.attach_vif_to_node(machine, port['id'])
|
||||
|
||||
def detach_port_from_machine(self, name_or_id, port_name_or_id):
|
||||
"""Detach a virtual port from the bare metal machine.
|
||||
|
||||
:param string name_or_id: A machine name or UUID.
|
||||
:param string port_name_or_id: A port name or UUID.
|
||||
Note that this is a Network service port, not a bare metal NIC.
|
||||
:return: Nothing.
|
||||
"""
|
||||
machine = self.get_machine(name_or_id)
|
||||
port = self.get_port(port_name_or_id)
|
||||
self.baremetal.detach_vif_from_node(machine, port['id'])
|
||||
|
||||
def list_ports_attached_to_machine(self, name_or_id):
|
||||
"""List virtual ports attached to the bare metal machine.
|
||||
|
||||
:param string name_or_id: A machine name or UUID.
|
||||
:returns: List of ``munch.Munch`` representing the ports.
|
||||
"""
|
||||
machine = self.get_machine(name_or_id)
|
||||
vif_ids = self.baremetal.list_node_vifs(machine)
|
||||
return [self.get_port(vif) for vif in vif_ids]
|
||||
|
||||
def validate_machine(self, name_or_id, for_deploy=True):
|
||||
"""Validate parameters of the machine.
|
||||
|
||||
:param string name_or_id: The Name or UUID value representing the
|
||||
baremetal node.
|
||||
:param bool for_deploy: If ``True``, validate readiness for deployment,
|
||||
otherwise validate only the power management
|
||||
properties.
|
||||
:raises: :exc:`~openstack.exceptions.ValidationException`
|
||||
"""
|
||||
if for_deploy:
|
||||
ifaces = ('boot', 'deploy', 'management', 'power')
|
||||
else:
|
||||
ifaces = ('power',)
|
||||
self.baremetal.validate_node(name_or_id, required=ifaces)
|
||||
|
||||
def validate_node(self, uuid):
|
||||
warnings.warn('validate_node is deprecated, please use '
|
||||
'validate_machine instead', DeprecationWarning)
|
||||
self.baremetal.validate_node(uuid)
|
||||
|
||||
def node_set_provision_state(self,
|
||||
name_or_id,
|
||||
state,
|
||||
configdrive=None,
|
||||
wait=False,
|
||||
timeout=3600):
|
||||
"""Set Node Provision State
|
||||
|
||||
Enables a user to provision a Machine and optionally define a
|
||||
config drive to be utilized.
|
||||
|
||||
:param string name_or_id: The Name or UUID value representing the
|
||||
baremetal node.
|
||||
:param string state: The desired provision state for the
|
||||
baremetal node.
|
||||
:param string configdrive: An optional URL or file or path
|
||||
representing the configdrive. In the
|
||||
case of a directory, the client API
|
||||
will create a properly formatted
|
||||
configuration drive file and post the
|
||||
file contents to the API for
|
||||
deployment.
|
||||
:param boolean wait: A boolean value, defaulted to false, to control
|
||||
if the method will wait for the desire end state
|
||||
to be reached before returning.
|
||||
:param integer timeout: Integer value, defaulting to 3600 seconds,
|
||||
representing the amount of time to wait for
|
||||
the desire end state to be reached.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: ``munch.Munch`` representing the current state of the machine
|
||||
upon exit of the method.
|
||||
"""
|
||||
node = self.baremetal.set_node_provision_state(
|
||||
name_or_id, target=state, config_drive=configdrive,
|
||||
wait=wait, timeout=timeout)
|
||||
return node._to_munch()
|
||||
|
||||
def set_machine_maintenance_state(
|
||||
self,
|
||||
name_or_id,
|
||||
state=True,
|
||||
reason=None):
|
||||
"""Set Baremetal Machine Maintenance State
|
||||
|
||||
Sets Baremetal maintenance state and maintenance reason.
|
||||
|
||||
:param string name_or_id: The Name or UUID value representing the
|
||||
baremetal node.
|
||||
:param boolean state: The desired state of the node. True being in
|
||||
maintenance where as False means the machine
|
||||
is not in maintenance mode. This value
|
||||
defaults to True if not explicitly set.
|
||||
:param string reason: An optional freeform string that is supplied to
|
||||
the baremetal API to allow for notation as to why
|
||||
the node is in maintenance state.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
if state:
|
||||
self.baremetal.set_node_maintenance(name_or_id, reason)
|
||||
else:
|
||||
self.baremetal.unset_node_maintenance(name_or_id)
|
||||
|
||||
def remove_machine_from_maintenance(self, name_or_id):
|
||||
"""Remove Baremetal Machine from Maintenance State
|
||||
|
||||
Similarly to set_machine_maintenance_state, this method
|
||||
removes a machine from maintenance state. It must be noted
|
||||
that this method simpily calls set_machine_maintenace_state
|
||||
for the name_or_id requested and sets the state to False.
|
||||
|
||||
:param string name_or_id: The Name or UUID value representing the
|
||||
baremetal node.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
self.baremetal.unset_node_maintenance(name_or_id)
|
||||
|
||||
def set_machine_power_on(self, name_or_id):
|
||||
"""Activate baremetal machine power
|
||||
|
||||
This is a method that sets the node power state to "on".
|
||||
|
||||
:params string name_or_id: A string representing the baremetal
|
||||
node to have power turned to an "on"
|
||||
state.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
self.baremetal.set_node_power_state(name_or_id, 'power on')
|
||||
|
||||
def set_machine_power_off(self, name_or_id):
|
||||
"""De-activate baremetal machine power
|
||||
|
||||
This is a method that sets the node power state to "off".
|
||||
|
||||
:params string name_or_id: A string representing the baremetal
|
||||
node to have power turned to an "off"
|
||||
state.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns:
|
||||
"""
|
||||
self.baremetal.set_node_power_state(name_or_id, 'power off')
|
||||
|
||||
def set_machine_power_reboot(self, name_or_id):
|
||||
"""De-activate baremetal machine power
|
||||
|
||||
This is a method that sets the node power state to "reboot", which
|
||||
in essence changes the machine power state to "off", and that back
|
||||
to "on".
|
||||
|
||||
:params string name_or_id: A string representing the baremetal
|
||||
node to have power turned to an "off"
|
||||
state.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
self.baremetal.set_node_power_state(name_or_id, 'rebooting')
|
||||
|
||||
def activate_node(self, uuid, configdrive=None,
|
||||
wait=False, timeout=1200):
|
||||
self.node_set_provision_state(
|
||||
uuid, 'active', configdrive, wait=wait, timeout=timeout)
|
||||
|
||||
def deactivate_node(self, uuid, wait=False,
|
||||
timeout=1200):
|
||||
self.node_set_provision_state(
|
||||
uuid, 'deleted', wait=wait, timeout=timeout)
|
||||
|
||||
def set_node_instance_info(self, uuid, patch):
|
||||
msg = ("Error updating machine via patch operation on node "
|
||||
"{node}".format(node=uuid))
|
||||
url = '/nodes/{node_id}'.format(node_id=uuid)
|
||||
return self._baremetal_client.patch(url,
|
||||
json=patch,
|
||||
error_message=msg)
|
||||
|
||||
def purge_node_instance_info(self, uuid):
|
||||
patch = []
|
||||
patch.append({'op': 'remove', 'path': '/instance_info'})
|
||||
msg = ("Error updating machine via patch operation on node "
|
||||
"{node}".format(node=uuid))
|
||||
url = '/nodes/{node_id}'.format(node_id=uuid)
|
||||
return self._baremetal_client.patch(url,
|
||||
json=patch,
|
||||
error_message=msg)
|
||||
|
||||
def wait_for_baremetal_node_lock(self, node, timeout=30):
|
||||
"""Wait for a baremetal node to have no lock.
|
||||
|
||||
DEPRECATED, use ``wait_for_node_reservation`` on the `baremetal` proxy.
|
||||
|
||||
:raises: OpenStackCloudException upon client failure.
|
||||
:returns: None
|
||||
"""
|
||||
warnings.warn("The wait_for_baremetal_node_lock call is deprecated "
|
||||
"in favor of wait_for_node_reservation on the baremetal "
|
||||
"proxy", DeprecationWarning)
|
||||
self.baremetal.wait_for_node_reservation(node, timeout)
|
|
@ -0,0 +1,870 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import types # noqa
|
||||
import warnings
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
from openstack import proxy
|
||||
from openstack import utils
|
||||
|
||||
|
||||
def _no_pending_volumes(volumes):
|
||||
"""If there are any volumes not in a steady state, don't cache"""
|
||||
for volume in volumes:
|
||||
if volume['status'] not in ('available', 'error', 'in-use'):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BlockStorageCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _volume_client(self):
|
||||
if 'block-storage' not in self._raw_clients:
|
||||
client = self._get_raw_client('block-storage')
|
||||
self._raw_clients['block-storage'] = client
|
||||
return self._raw_clients['block-storage']
|
||||
|
||||
@_utils.cache_on_arguments(should_cache_fn=_no_pending_volumes)
|
||||
def list_volumes(self, cache=True):
|
||||
"""List all available volumes.
|
||||
|
||||
:returns: A list of volume ``munch.Munch``.
|
||||
|
||||
"""
|
||||
def _list(data):
|
||||
volumes.extend(data.get('volumes', []))
|
||||
endpoint = None
|
||||
for l in data.get('volumes_links', []):
|
||||
if 'rel' in l and 'next' == l['rel']:
|
||||
endpoint = l['href']
|
||||
break
|
||||
if endpoint:
|
||||
try:
|
||||
_list(self._volume_client.get(endpoint))
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
# Catch and re-raise here because we are making recursive
|
||||
# calls and we just have context for the log here
|
||||
self.log.debug(
|
||||
"While listing volumes, could not find next link"
|
||||
" {link}.".format(link=data))
|
||||
raise
|
||||
|
||||
if not cache:
|
||||
warnings.warn('cache argument to list_volumes is deprecated. Use '
|
||||
'invalidate instead.')
|
||||
|
||||
# Fetching paginated volumes can fails for several reasons, if
|
||||
# something goes wrong we'll have to start fetching volumes from
|
||||
# scratch
|
||||
attempts = 5
|
||||
for _ in range(attempts):
|
||||
volumes = []
|
||||
data = self._volume_client.get('/volumes/detail')
|
||||
if 'volumes_links' not in data:
|
||||
# no pagination needed
|
||||
volumes.extend(data.get('volumes', []))
|
||||
break
|
||||
|
||||
try:
|
||||
_list(data)
|
||||
break
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
pass
|
||||
else:
|
||||
self.log.debug(
|
||||
"List volumes failed to retrieve all volumes after"
|
||||
" {attempts} attempts. Returning what we found.".format(
|
||||
attempts=attempts))
|
||||
# list volumes didn't complete succesfully so just return what
|
||||
# we found
|
||||
return self._normalize_volumes(
|
||||
self._get_and_munchify(key=None, data=volumes))
|
||||
|
||||
@_utils.cache_on_arguments()
|
||||
def list_volume_types(self, get_extra=True):
|
||||
"""List all available volume types.
|
||||
|
||||
:returns: A list of volume ``munch.Munch``.
|
||||
|
||||
"""
|
||||
data = self._volume_client.get(
|
||||
'/types',
|
||||
params=dict(is_public='None'),
|
||||
error_message='Error fetching volume_type list')
|
||||
return self._normalize_volume_types(
|
||||
self._get_and_munchify('volume_types', data))
|
||||
|
||||
def get_volume(self, name_or_id, filters=None):
|
||||
"""Get a volume by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the volume.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A volume ``munch.Munch`` or None if no matching volume is
|
||||
found.
|
||||
|
||||
"""
|
||||
return _utils._get_entity(self, 'volume', name_or_id, filters)
|
||||
|
||||
def get_volume_by_id(self, id):
|
||||
""" Get a volume by ID
|
||||
|
||||
:param id: ID of the volume.
|
||||
:returns: A volume ``munch.Munch``.
|
||||
"""
|
||||
data = self._volume_client.get(
|
||||
'/volumes/{id}'.format(id=id),
|
||||
error_message="Error getting volume with ID {id}".format(id=id)
|
||||
)
|
||||
volume = self._normalize_volume(
|
||||
self._get_and_munchify('volume', data))
|
||||
|
||||
return volume
|
||||
|
||||
def get_volume_type(self, name_or_id, filters=None):
|
||||
"""Get a volume type by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the volume.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A volume ``munch.Munch`` or None if no matching volume is
|
||||
found.
|
||||
|
||||
"""
|
||||
return _utils._get_entity(
|
||||
self, 'volume_type', name_or_id, filters)
|
||||
|
||||
def create_volume(
|
||||
self, size,
|
||||
wait=True, timeout=None, image=None, bootable=None, **kwargs):
|
||||
"""Create a volume.
|
||||
|
||||
:param size: Size, in GB of the volume to create.
|
||||
:param name: (optional) Name for the volume.
|
||||
:param description: (optional) Name for the volume.
|
||||
:param wait: If true, waits for volume to be created.
|
||||
:param timeout: Seconds to wait for volume creation. None is forever.
|
||||
:param image: (optional) Image name, ID or object from which to create
|
||||
the volume
|
||||
:param bootable: (optional) Make this volume bootable. If set, wait
|
||||
will also be set to true.
|
||||
:param kwargs: Keyword arguments as expected for cinder client.
|
||||
|
||||
:returns: The created volume object.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
if bootable is not None:
|
||||
wait = True
|
||||
|
||||
if image:
|
||||
image_obj = self.get_image(image)
|
||||
if not image_obj:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Image {image} was requested as the basis for a new"
|
||||
" volume, but was not found on the cloud".format(
|
||||
image=image))
|
||||
kwargs['imageRef'] = image_obj['id']
|
||||
kwargs = self._get_volume_kwargs(kwargs)
|
||||
kwargs['size'] = size
|
||||
payload = dict(volume=kwargs)
|
||||
if 'scheduler_hints' in kwargs:
|
||||
payload['OS-SCH-HNT:scheduler_hints'] = kwargs.pop(
|
||||
'scheduler_hints', None)
|
||||
data = self._volume_client.post(
|
||||
'/volumes',
|
||||
json=dict(payload),
|
||||
error_message='Error in creating volume')
|
||||
volume = self._get_and_munchify('volume', data)
|
||||
self.list_volumes.invalidate(self)
|
||||
|
||||
if volume['status'] == 'error':
|
||||
raise exc.OpenStackCloudException("Error in creating volume")
|
||||
|
||||
if wait:
|
||||
vol_id = volume['id']
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the volume to be available."):
|
||||
volume = self.get_volume(vol_id)
|
||||
|
||||
if not volume:
|
||||
continue
|
||||
|
||||
if volume['status'] == 'available':
|
||||
if bootable is not None:
|
||||
self.set_volume_bootable(volume, bootable=bootable)
|
||||
# no need to re-fetch to update the flag, just set it.
|
||||
volume['bootable'] = bootable
|
||||
return volume
|
||||
|
||||
if volume['status'] == 'error':
|
||||
raise exc.OpenStackCloudException("Error creating volume")
|
||||
|
||||
return self._normalize_volume(volume)
|
||||
|
||||
def update_volume(self, name_or_id, **kwargs):
|
||||
kwargs = self._get_volume_kwargs(kwargs)
|
||||
|
||||
volume = self.get_volume(name_or_id)
|
||||
if not volume:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Volume %s not found." % name_or_id)
|
||||
|
||||
data = self._volume_client.put(
|
||||
'/volumes/{volume_id}'.format(volume_id=volume.id),
|
||||
json=dict({'volume': kwargs}),
|
||||
error_message='Error updating volume')
|
||||
|
||||
self.list_volumes.invalidate(self)
|
||||
|
||||
return self._normalize_volume(self._get_and_munchify('volume', data))
|
||||
|
||||
def set_volume_bootable(self, name_or_id, bootable=True):
|
||||
"""Set a volume's bootable flag.
|
||||
|
||||
:param name_or_id: Name, unique ID of the volume or a volume dict.
|
||||
:param bool bootable: Whether the volume should be bootable.
|
||||
(Defaults to True)
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
volume = self.get_volume(name_or_id)
|
||||
|
||||
if not volume:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Volume {name_or_id} does not exist".format(
|
||||
name_or_id=name_or_id))
|
||||
|
||||
self._volume_client.post(
|
||||
'volumes/{id}/action'.format(id=volume['id']),
|
||||
json={'os-set_bootable': {'bootable': bootable}},
|
||||
error_message="Error setting bootable on volume {volume}".format(
|
||||
volume=volume['id'])
|
||||
)
|
||||
|
||||
def delete_volume(self, name_or_id=None, wait=True, timeout=None,
|
||||
force=False):
|
||||
"""Delete a volume.
|
||||
|
||||
:param name_or_id: Name or unique ID of the volume.
|
||||
:param wait: If true, waits for volume to be deleted.
|
||||
:param timeout: Seconds to wait for volume deletion. None is forever.
|
||||
:param force: Force delete volume even if the volume is in deleting
|
||||
or error_deleting state.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
self.list_volumes.invalidate(self)
|
||||
volume = self.get_volume(name_or_id)
|
||||
|
||||
if not volume:
|
||||
self.log.debug(
|
||||
"Volume %(name_or_id)s does not exist",
|
||||
{'name_or_id': name_or_id},
|
||||
exc_info=True)
|
||||
return False
|
||||
|
||||
with _utils.shade_exceptions("Error in deleting volume"):
|
||||
try:
|
||||
if force:
|
||||
self._volume_client.post(
|
||||
'volumes/{id}/action'.format(id=volume['id']),
|
||||
json={'os-force_delete': None})
|
||||
else:
|
||||
self._volume_client.delete(
|
||||
'volumes/{id}'.format(id=volume['id']))
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
self.log.debug(
|
||||
"Volume {id} not found when deleting. Ignoring.".format(
|
||||
id=volume['id']))
|
||||
return False
|
||||
|
||||
self.list_volumes.invalidate(self)
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the volume to be deleted."):
|
||||
|
||||
if not self.get_volume(volume['id']):
|
||||
break
|
||||
|
||||
return True
|
||||
|
||||
def get_volumes(self, server, cache=True):
|
||||
volumes = []
|
||||
for volume in self.list_volumes(cache=cache):
|
||||
for attach in volume['attachments']:
|
||||
if attach['server_id'] == server['id']:
|
||||
volumes.append(volume)
|
||||
return volumes
|
||||
|
||||
def get_volume_limits(self, name_or_id=None):
|
||||
""" Get volume limits for a project
|
||||
|
||||
:param name_or_id: (optional) project name or ID to get limits for
|
||||
if different from the current project
|
||||
:raises: OpenStackCloudException if it's not a valid project
|
||||
|
||||
:returns: Munch object with the limits
|
||||
"""
|
||||
params = {}
|
||||
project_id = None
|
||||
error_msg = "Failed to get limits"
|
||||
if name_or_id:
|
||||
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
project_id = proj.id
|
||||
params['tenant_id'] = project_id
|
||||
error_msg = "{msg} for the project: {project} ".format(
|
||||
msg=error_msg, project=name_or_id)
|
||||
|
||||
data = self._volume_client.get('/limits', params=params)
|
||||
limits = self._get_and_munchify('limits', data)
|
||||
return limits
|
||||
|
||||
def get_volume_id(self, name_or_id):
|
||||
volume = self.get_volume(name_or_id)
|
||||
if volume:
|
||||
return volume['id']
|
||||
return None
|
||||
|
||||
def volume_exists(self, name_or_id):
|
||||
return self.get_volume(name_or_id) is not None
|
||||
|
||||
def get_volume_attach_device(self, volume, server_id):
|
||||
"""Return the device name a volume is attached to for a server.
|
||||
|
||||
This can also be used to verify if a volume is attached to
|
||||
a particular server.
|
||||
|
||||
:param volume: Volume dict
|
||||
:param server_id: ID of server to check
|
||||
|
||||
:returns: Device name if attached, None if volume is not attached.
|
||||
"""
|
||||
for attach in volume['attachments']:
|
||||
if server_id == attach['server_id']:
|
||||
return attach['device']
|
||||
return None
|
||||
|
||||
def detach_volume(self, server, volume, wait=True, timeout=None):
|
||||
"""Detach a volume from a server.
|
||||
|
||||
:param server: The server dict to detach from.
|
||||
:param volume: The volume dict to detach.
|
||||
:param wait: If true, waits for volume to be detached.
|
||||
:param timeout: Seconds to wait for volume detachment. None is forever.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
proxy._json_response(self.compute.delete(
|
||||
'/servers/{server_id}/os-volume_attachments/{volume_id}'.format(
|
||||
server_id=server['id'], volume_id=volume['id'])),
|
||||
error_message=(
|
||||
"Error detaching volume {volume} from server {server}".format(
|
||||
volume=volume['id'], server=server['id'])))
|
||||
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for volume %s to detach." % volume['id']):
|
||||
try:
|
||||
vol = self.get_volume(volume['id'])
|
||||
except Exception:
|
||||
self.log.debug(
|
||||
"Error getting volume info %s", volume['id'],
|
||||
exc_info=True)
|
||||
continue
|
||||
|
||||
if vol['status'] == 'available':
|
||||
return
|
||||
|
||||
if vol['status'] == 'error':
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error in detaching volume %s" % volume['id']
|
||||
)
|
||||
|
||||
def attach_volume(self, server, volume, device=None,
|
||||
wait=True, timeout=None):
|
||||
"""Attach a volume to a server.
|
||||
|
||||
This will attach a volume, described by the passed in volume
|
||||
dict (as returned by get_volume()), to the server described by
|
||||
the passed in server dict (as returned by get_server()) on the
|
||||
named device on the server.
|
||||
|
||||
If the volume is already attached to the server, or generally not
|
||||
available, then an exception is raised. To re-attach to a server,
|
||||
but under a different device, the user must detach it first.
|
||||
|
||||
:param server: The server dict to attach to.
|
||||
:param volume: The volume dict to attach.
|
||||
:param device: The device name where the volume will attach.
|
||||
:param wait: If true, waits for volume to be attached.
|
||||
:param timeout: Seconds to wait for volume attachment. None is forever.
|
||||
|
||||
:returns: a volume attachment object.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
dev = self.get_volume_attach_device(volume, server['id'])
|
||||
if dev:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Volume %s already attached to server %s on device %s"
|
||||
% (volume['id'], server['id'], dev)
|
||||
)
|
||||
|
||||
if volume['status'] != 'available':
|
||||
raise exc.OpenStackCloudException(
|
||||
"Volume %s is not available. Status is '%s'"
|
||||
% (volume['id'], volume['status'])
|
||||
)
|
||||
|
||||
payload = {'volumeId': volume['id']}
|
||||
if device:
|
||||
payload['device'] = device
|
||||
data = proxy._json_response(
|
||||
self.compute.post(
|
||||
'/servers/{server_id}/os-volume_attachments'.format(
|
||||
server_id=server['id']),
|
||||
json=dict(volumeAttachment=payload)),
|
||||
error_message="Error attaching volume {volume_id} to server "
|
||||
"{server_id}".format(volume_id=volume['id'],
|
||||
server_id=server['id']))
|
||||
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for volume %s to attach." % volume['id']):
|
||||
try:
|
||||
self.list_volumes.invalidate(self)
|
||||
vol = self.get_volume(volume['id'])
|
||||
except Exception:
|
||||
self.log.debug(
|
||||
"Error getting volume info %s", volume['id'],
|
||||
exc_info=True)
|
||||
continue
|
||||
|
||||
if self.get_volume_attach_device(vol, server['id']):
|
||||
break
|
||||
|
||||
# TODO(Shrews) check to see if a volume can be in error status
|
||||
# and also attached. If so, we should move this
|
||||
# above the get_volume_attach_device call
|
||||
if vol['status'] == 'error':
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error in attaching volume %s" % volume['id']
|
||||
)
|
||||
return self._normalize_volume_attachment(
|
||||
self._get_and_munchify('volumeAttachment', data))
|
||||
|
||||
def _get_volume_kwargs(self, kwargs):
|
||||
name = kwargs.pop('name', kwargs.pop('display_name', None))
|
||||
description = kwargs.pop('description',
|
||||
kwargs.pop('display_description', None))
|
||||
if name:
|
||||
if self._is_client_version('volume', 2):
|
||||
kwargs['name'] = name
|
||||
else:
|
||||
kwargs['display_name'] = name
|
||||
if description:
|
||||
if self._is_client_version('volume', 2):
|
||||
kwargs['description'] = description
|
||||
else:
|
||||
kwargs['display_description'] = description
|
||||
return kwargs
|
||||
|
||||
@_utils.valid_kwargs('name', 'display_name',
|
||||
'description', 'display_description')
|
||||
def create_volume_snapshot(self, volume_id, force=False,
|
||||
wait=True, timeout=None, **kwargs):
|
||||
"""Create a volume.
|
||||
|
||||
:param volume_id: the ID of the volume to snapshot.
|
||||
:param force: If set to True the snapshot will be created even if the
|
||||
volume is attached to an instance, if False it will not
|
||||
:param name: name of the snapshot, one will be generated if one is
|
||||
not provided
|
||||
:param description: description of the snapshot, one will be generated
|
||||
if one is not provided
|
||||
:param wait: If true, waits for volume snapshot to be created.
|
||||
:param timeout: Seconds to wait for volume snapshot creation. None is
|
||||
forever.
|
||||
|
||||
:returns: The created volume object.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
kwargs = self._get_volume_kwargs(kwargs)
|
||||
payload = {'volume_id': volume_id, 'force': force}
|
||||
payload.update(kwargs)
|
||||
data = self._volume_client.post(
|
||||
'/snapshots',
|
||||
json=dict(snapshot=payload),
|
||||
error_message="Error creating snapshot of volume "
|
||||
"{volume_id}".format(volume_id=volume_id))
|
||||
snapshot = self._get_and_munchify('snapshot', data)
|
||||
if wait:
|
||||
snapshot_id = snapshot['id']
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the volume snapshot to be available."
|
||||
):
|
||||
snapshot = self.get_volume_snapshot_by_id(snapshot_id)
|
||||
|
||||
if snapshot['status'] == 'available':
|
||||
break
|
||||
|
||||
if snapshot['status'] == 'error':
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error in creating volume snapshot")
|
||||
|
||||
# TODO(mordred) need to normalize snapshots. We were normalizing them
|
||||
# as volumes, which is an error. They need to be normalized as
|
||||
# volume snapshots, which are completely different objects
|
||||
return snapshot
|
||||
|
||||
def get_volume_snapshot_by_id(self, snapshot_id):
|
||||
"""Takes a snapshot_id and gets a dict of the snapshot
|
||||
that maches that ID.
|
||||
|
||||
Note: This is more efficient than get_volume_snapshot.
|
||||
|
||||
param: snapshot_id: ID of the volume snapshot.
|
||||
|
||||
"""
|
||||
data = self._volume_client.get(
|
||||
'/snapshots/{snapshot_id}'.format(snapshot_id=snapshot_id),
|
||||
error_message="Error getting snapshot "
|
||||
"{snapshot_id}".format(snapshot_id=snapshot_id))
|
||||
return self._normalize_volume(
|
||||
self._get_and_munchify('snapshot', data))
|
||||
|
||||
def get_volume_snapshot(self, name_or_id, filters=None):
|
||||
"""Get a volume by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the volume snapshot.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A volume ``munch.Munch`` or None if no matching volume is
|
||||
found.
|
||||
"""
|
||||
return _utils._get_entity(self, 'volume_snapshot', name_or_id,
|
||||
filters)
|
||||
|
||||
def create_volume_backup(self, volume_id, name=None, description=None,
|
||||
force=False, wait=True, timeout=None):
|
||||
"""Create a volume backup.
|
||||
|
||||
:param volume_id: the ID of the volume to backup.
|
||||
:param name: name of the backup, one will be generated if one is
|
||||
not provided
|
||||
:param description: description of the backup, one will be generated
|
||||
if one is not provided
|
||||
:param force: If set to True the backup will be created even if the
|
||||
volume is attached to an instance, if False it will not
|
||||
:param wait: If true, waits for volume backup to be created.
|
||||
:param timeout: Seconds to wait for volume backup creation. None is
|
||||
forever.
|
||||
|
||||
:returns: The created volume backup object.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
payload = {
|
||||
'name': name,
|
||||
'volume_id': volume_id,
|
||||
'description': description,
|
||||
'force': force,
|
||||
}
|
||||
|
||||
data = self._volume_client.post(
|
||||
'/backups', json=dict(backup=payload),
|
||||
error_message="Error creating backup of volume "
|
||||
"{volume_id}".format(volume_id=volume_id))
|
||||
backup = self._get_and_munchify('backup', data)
|
||||
|
||||
if wait:
|
||||
backup_id = backup['id']
|
||||
msg = ("Timeout waiting for the volume backup {} to be "
|
||||
"available".format(backup_id))
|
||||
for _ in utils.iterate_timeout(timeout, msg):
|
||||
backup = self.get_volume_backup(backup_id)
|
||||
|
||||
if backup['status'] == 'available':
|
||||
break
|
||||
|
||||
if backup['status'] == 'error':
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error in creating volume backup {id}".format(
|
||||
id=backup_id))
|
||||
|
||||
return backup
|
||||
|
||||
def get_volume_backup(self, name_or_id, filters=None):
|
||||
"""Get a volume backup by name or ID.
|
||||
|
||||
:returns: A backup ``munch.Munch`` or None if no matching backup is
|
||||
found.
|
||||
"""
|
||||
return _utils._get_entity(self, 'volume_backup', name_or_id,
|
||||
filters)
|
||||
|
||||
def list_volume_snapshots(self, detailed=True, search_opts=None):
|
||||
"""List all volume snapshots.
|
||||
|
||||
:returns: A list of volume snapshots ``munch.Munch``.
|
||||
|
||||
"""
|
||||
endpoint = '/snapshots/detail' if detailed else '/snapshots'
|
||||
data = self._volume_client.get(
|
||||
endpoint,
|
||||
params=search_opts,
|
||||
error_message="Error getting a list of snapshots")
|
||||
return self._get_and_munchify('snapshots', data)
|
||||
|
||||
def list_volume_backups(self, detailed=True, search_opts=None):
|
||||
"""
|
||||
List all volume backups.
|
||||
|
||||
:param bool detailed: Also list details for each entry
|
||||
:param dict search_opts: Search options
|
||||
A dictionary of meta data to use for further filtering. Example::
|
||||
|
||||
{
|
||||
'name': 'my-volume-backup',
|
||||
'status': 'available',
|
||||
'volume_id': 'e126044c-7b4c-43be-a32a-c9cbbc9ddb56',
|
||||
'all_tenants': 1
|
||||
}
|
||||
|
||||
:returns: A list of volume backups ``munch.Munch``.
|
||||
"""
|
||||
endpoint = '/backups/detail' if detailed else '/backups'
|
||||
data = self._volume_client.get(
|
||||
endpoint, params=search_opts,
|
||||
error_message="Error getting a list of backups")
|
||||
return self._get_and_munchify('backups', data)
|
||||
|
||||
def delete_volume_backup(self, name_or_id=None, force=False, wait=False,
|
||||
timeout=None):
|
||||
"""Delete a volume backup.
|
||||
|
||||
:param name_or_id: Name or unique ID of the volume backup.
|
||||
:param force: Allow delete in state other than error or available.
|
||||
:param wait: If true, waits for volume backup to be deleted.
|
||||
:param timeout: Seconds to wait for volume backup deletion. None is
|
||||
forever.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
volume_backup = self.get_volume_backup(name_or_id)
|
||||
|
||||
if not volume_backup:
|
||||
return False
|
||||
|
||||
msg = "Error in deleting volume backup"
|
||||
if force:
|
||||
self._volume_client.post(
|
||||
'/backups/{backup_id}/action'.format(
|
||||
backup_id=volume_backup['id']),
|
||||
json={'os-force_delete': None},
|
||||
error_message=msg)
|
||||
else:
|
||||
self._volume_client.delete(
|
||||
'/backups/{backup_id}'.format(
|
||||
backup_id=volume_backup['id']),
|
||||
error_message=msg)
|
||||
if wait:
|
||||
msg = "Timeout waiting for the volume backup to be deleted."
|
||||
for count in utils.iterate_timeout(timeout, msg):
|
||||
if not self.get_volume_backup(volume_backup['id']):
|
||||
break
|
||||
|
||||
return True
|
||||
|
||||
def delete_volume_snapshot(self, name_or_id=None, wait=False,
|
||||
timeout=None):
|
||||
"""Delete a volume snapshot.
|
||||
|
||||
:param name_or_id: Name or unique ID of the volume snapshot.
|
||||
:param wait: If true, waits for volume snapshot to be deleted.
|
||||
:param timeout: Seconds to wait for volume snapshot deletion. None is
|
||||
forever.
|
||||
|
||||
:raises: OpenStackCloudTimeout if wait time exceeded.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
volumesnapshot = self.get_volume_snapshot(name_or_id)
|
||||
|
||||
if not volumesnapshot:
|
||||
return False
|
||||
|
||||
self._volume_client.delete(
|
||||
'/snapshots/{snapshot_id}'.format(
|
||||
snapshot_id=volumesnapshot['id']),
|
||||
error_message="Error in deleting volume snapshot")
|
||||
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the volume snapshot to be deleted."):
|
||||
if not self.get_volume_snapshot(volumesnapshot['id']):
|
||||
break
|
||||
|
||||
return True
|
||||
|
||||
def search_volumes(self, name_or_id=None, filters=None):
|
||||
volumes = self.list_volumes()
|
||||
return _utils._filter_list(
|
||||
volumes, name_or_id, filters)
|
||||
|
||||
def search_volume_snapshots(self, name_or_id=None, filters=None):
|
||||
volumesnapshots = self.list_volume_snapshots()
|
||||
return _utils._filter_list(
|
||||
volumesnapshots, name_or_id, filters)
|
||||
|
||||
def search_volume_backups(self, name_or_id=None, filters=None):
|
||||
volume_backups = self.list_volume_backups()
|
||||
return _utils._filter_list(
|
||||
volume_backups, name_or_id, filters)
|
||||
|
||||
def search_volume_types(
|
||||
self, name_or_id=None, filters=None, get_extra=True):
|
||||
volume_types = self.list_volume_types(get_extra=get_extra)
|
||||
return _utils._filter_list(volume_types, name_or_id, filters)
|
||||
|
||||
def get_volume_type_access(self, name_or_id):
|
||||
"""Return a list of volume_type_access.
|
||||
|
||||
:param name_or_id: Name or ID of the volume type.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
volume_type = self.get_volume_type(name_or_id)
|
||||
if not volume_type:
|
||||
raise exc.OpenStackCloudException(
|
||||
"VolumeType not found: %s" % name_or_id)
|
||||
|
||||
data = self._volume_client.get(
|
||||
'/types/{id}/os-volume-type-access'.format(id=volume_type.id),
|
||||
error_message="Unable to get volume type access"
|
||||
" {name}".format(name=name_or_id))
|
||||
return self._normalize_volume_type_accesses(
|
||||
self._get_and_munchify('volume_type_access', data))
|
||||
|
||||
def add_volume_type_access(self, name_or_id, project_id):
|
||||
"""Grant access on a volume_type to a project.
|
||||
|
||||
:param name_or_id: ID or name of a volume_type
|
||||
:param project_id: A project id
|
||||
|
||||
NOTE: the call works even if the project does not exist.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
volume_type = self.get_volume_type(name_or_id)
|
||||
if not volume_type:
|
||||
raise exc.OpenStackCloudException(
|
||||
"VolumeType not found: %s" % name_or_id)
|
||||
with _utils.shade_exceptions():
|
||||
payload = {'project': project_id}
|
||||
self._volume_client.post(
|
||||
'/types/{id}/action'.format(id=volume_type.id),
|
||||
json=dict(addProjectAccess=payload),
|
||||
error_message="Unable to authorize {project} "
|
||||
"to use volume type {name}".format(
|
||||
name=name_or_id, project=project_id))
|
||||
|
||||
def remove_volume_type_access(self, name_or_id, project_id):
|
||||
"""Revoke access on a volume_type to a project.
|
||||
|
||||
:param name_or_id: ID or name of a volume_type
|
||||
:param project_id: A project id
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
volume_type = self.get_volume_type(name_or_id)
|
||||
if not volume_type:
|
||||
raise exc.OpenStackCloudException(
|
||||
"VolumeType not found: %s" % name_or_id)
|
||||
with _utils.shade_exceptions():
|
||||
payload = {'project': project_id}
|
||||
self._volume_client.post(
|
||||
'/types/{id}/action'.format(id=volume_type.id),
|
||||
json=dict(removeProjectAccess=payload),
|
||||
error_message="Unable to revoke {project} "
|
||||
"to use volume type {name}".format(
|
||||
name=name_or_id, project=project_id))
|
|
@ -0,0 +1,567 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import types # noqa
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
from openstack import utils
|
||||
|
||||
|
||||
class ClusteringCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _clustering_client(self):
|
||||
if 'clustering' not in self._raw_clients:
|
||||
clustering_client = self._get_versioned_client(
|
||||
'clustering', min_version=1, max_version='1.latest')
|
||||
self._raw_clients['clustering'] = clustering_client
|
||||
return self._raw_clients['clustering']
|
||||
|
||||
def create_cluster(self, name, profile, config=None, desired_capacity=0,
|
||||
max_size=None, metadata=None, min_size=None,
|
||||
timeout=None):
|
||||
profile = self.get_cluster_profile(profile)
|
||||
profile_id = profile['id']
|
||||
body = {
|
||||
'desired_capacity': desired_capacity,
|
||||
'name': name,
|
||||
'profile_id': profile_id
|
||||
}
|
||||
|
||||
if config is not None:
|
||||
body['config'] = config
|
||||
|
||||
if max_size is not None:
|
||||
body['max_size'] = max_size
|
||||
|
||||
if metadata is not None:
|
||||
body['metadata'] = metadata
|
||||
|
||||
if min_size is not None:
|
||||
body['min_size'] = min_size
|
||||
|
||||
if timeout is not None:
|
||||
body['timeout'] = timeout
|
||||
|
||||
data = self._clustering_client.post(
|
||||
'/clusters', json={'cluster': body},
|
||||
error_message="Error creating cluster {name}".format(name=name))
|
||||
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
def set_cluster_metadata(self, name_or_id, metadata):
|
||||
cluster = self.get_cluster(name_or_id)
|
||||
if not cluster:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid Cluster {cluster}'.format(cluster=name_or_id))
|
||||
|
||||
self._clustering_client.post(
|
||||
'/clusters/{cluster_id}/metadata'.format(cluster_id=cluster['id']),
|
||||
json={'metadata': metadata},
|
||||
error_message='Error updating cluster metadata')
|
||||
|
||||
def get_cluster_by_id(self, cluster_id):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
"/clusters/{cluster_id}".format(cluster_id=cluster_id),
|
||||
error_message="Error fetching cluster {name}".format(
|
||||
name=cluster_id))
|
||||
return self._get_and_munchify('cluster', data)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def get_cluster(self, name_or_id, filters=None):
|
||||
return _utils._get_entity(
|
||||
cloud=self, resource='cluster',
|
||||
name_or_id=name_or_id, filters=filters)
|
||||
|
||||
def update_cluster(self, name_or_id, new_name=None,
|
||||
profile_name_or_id=None, config=None, metadata=None,
|
||||
timeout=None, profile_only=False):
|
||||
old_cluster = self.get_cluster(name_or_id)
|
||||
if old_cluster is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid Cluster {cluster}'.format(cluster=name_or_id))
|
||||
cluster = {
|
||||
'profile_only': profile_only
|
||||
}
|
||||
|
||||
if config is not None:
|
||||
cluster['config'] = config
|
||||
|
||||
if metadata is not None:
|
||||
cluster['metadata'] = metadata
|
||||
|
||||
if profile_name_or_id is not None:
|
||||
profile = self.get_cluster_profile(profile_name_or_id)
|
||||
if profile is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid Cluster Profile {profile}'.format(
|
||||
profile=profile_name_or_id))
|
||||
cluster['profile_id'] = profile.id
|
||||
|
||||
if timeout is not None:
|
||||
cluster['timeout'] = timeout
|
||||
|
||||
if new_name is not None:
|
||||
cluster['name'] = new_name
|
||||
|
||||
data = self._clustering_client.patch(
|
||||
"/clusters/{cluster_id}".format(cluster_id=old_cluster['id']),
|
||||
json={'cluster': cluster},
|
||||
error_message="Error updating cluster "
|
||||
"{name}".format(name=name_or_id))
|
||||
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
def delete_cluster(self, name_or_id):
|
||||
cluster = self.get_cluster(name_or_id)
|
||||
if cluster is None:
|
||||
self.log.debug("Cluster %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
for policy in self.list_policies_on_cluster(name_or_id):
|
||||
detach_policy = self.get_cluster_policy_by_id(
|
||||
policy['policy_id'])
|
||||
self.detach_policy_from_cluster(cluster, detach_policy)
|
||||
|
||||
for receiver in self.list_cluster_receivers():
|
||||
if cluster["id"] == receiver["cluster_id"]:
|
||||
self.delete_cluster_receiver(receiver["id"], wait=True)
|
||||
|
||||
self._clustering_client.delete(
|
||||
"/clusters/{cluster_id}".format(cluster_id=name_or_id),
|
||||
error_message="Error deleting cluster {name}".format(
|
||||
name=name_or_id))
|
||||
|
||||
return True
|
||||
|
||||
def search_clusters(self, name_or_id=None, filters=None):
|
||||
clusters = self.list_clusters()
|
||||
return _utils._filter_list(clusters, name_or_id, filters)
|
||||
|
||||
def list_clusters(self):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
'/clusters',
|
||||
error_message="Error fetching clusters")
|
||||
return self._get_and_munchify('clusters', data)
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return []
|
||||
|
||||
def attach_policy_to_cluster(self, name_or_id, policy_name_or_id,
|
||||
is_enabled):
|
||||
cluster = self.get_cluster(name_or_id)
|
||||
policy = self.get_cluster_policy(policy_name_or_id)
|
||||
if cluster is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Cluster {cluster} not found for attaching'.format(
|
||||
cluster=name_or_id))
|
||||
|
||||
if policy is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Policy {policy} not found for attaching'.format(
|
||||
policy=policy_name_or_id))
|
||||
|
||||
body = {
|
||||
'policy_id': policy['id'],
|
||||
'enabled': is_enabled
|
||||
}
|
||||
|
||||
self._clustering_client.post(
|
||||
"/clusters/{cluster_id}/actions".format(cluster_id=cluster['id']),
|
||||
error_message="Error attaching policy {policy} to cluster "
|
||||
"{cluster}".format(
|
||||
policy=policy['id'],
|
||||
cluster=cluster['id']),
|
||||
json={'policy_attach': body})
|
||||
|
||||
return True
|
||||
|
||||
def detach_policy_from_cluster(
|
||||
self, name_or_id, policy_name_or_id, wait=False, timeout=3600):
|
||||
cluster = self.get_cluster(name_or_id)
|
||||
policy = self.get_cluster_policy(policy_name_or_id)
|
||||
if cluster is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Cluster {cluster} not found for detaching'.format(
|
||||
cluster=name_or_id))
|
||||
|
||||
if policy is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Policy {policy} not found for detaching'.format(
|
||||
policy=policy_name_or_id))
|
||||
|
||||
body = {'policy_id': policy['id']}
|
||||
self._clustering_client.post(
|
||||
"/clusters/{cluster_id}/actions".format(cluster_id=cluster['id']),
|
||||
error_message="Error detaching policy {policy} from cluster "
|
||||
"{cluster}".format(
|
||||
policy=policy['id'],
|
||||
cluster=cluster['id']),
|
||||
json={'policy_detach': body})
|
||||
|
||||
if not wait:
|
||||
return True
|
||||
|
||||
value = []
|
||||
|
||||
for count in utils.iterate_timeout(
|
||||
timeout, "Timeout waiting for cluster policy to detach"):
|
||||
|
||||
# TODO(bjjohnson) This logic will wait until there are no policies.
|
||||
# Since we're detaching a specific policy, checking to make sure
|
||||
# that policy is not in the list of policies would be better.
|
||||
policy_status = self.get_cluster_by_id(cluster['id'])['policies']
|
||||
|
||||
if policy_status == value:
|
||||
break
|
||||
return True
|
||||
|
||||
def update_policy_on_cluster(self, name_or_id, policy_name_or_id,
|
||||
is_enabled):
|
||||
cluster = self.get_cluster(name_or_id)
|
||||
policy = self.get_cluster_policy(policy_name_or_id)
|
||||
if cluster is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Cluster {cluster} not found for updating'.format(
|
||||
cluster=name_or_id))
|
||||
|
||||
if policy is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Policy {policy} not found for updating'.format(
|
||||
policy=policy_name_or_id))
|
||||
|
||||
body = {
|
||||
'policy_id': policy['id'],
|
||||
'enabled': is_enabled
|
||||
}
|
||||
self._clustering_client.post(
|
||||
"/clusters/{cluster_id}/actions".format(cluster_id=cluster['id']),
|
||||
error_message="Error updating policy {policy} on cluster "
|
||||
"{cluster}".format(
|
||||
policy=policy['id'],
|
||||
cluster=cluster['id']),
|
||||
json={'policy_update': body})
|
||||
|
||||
return True
|
||||
|
||||
def get_policy_on_cluster(self, name_or_id, policy_name_or_id):
|
||||
try:
|
||||
policy = self._clustering_client.get(
|
||||
"/clusters/{cluster_id}/policies/{policy_id}".format(
|
||||
cluster_id=name_or_id, policy_id=policy_name_or_id),
|
||||
error_message="Error fetching policy "
|
||||
"{name}".format(name=policy_name_or_id))
|
||||
return self._get_and_munchify('cluster_policy', policy)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def list_policies_on_cluster(self, name_or_id):
|
||||
endpoint = "/clusters/{cluster_id}/policies".format(
|
||||
cluster_id=name_or_id)
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
endpoint,
|
||||
error_message="Error fetching cluster policies")
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return []
|
||||
return self._get_and_munchify('cluster_policies', data)
|
||||
|
||||
def create_cluster_profile(self, name, spec, metadata=None):
|
||||
profile = {
|
||||
'name': name,
|
||||
'spec': spec
|
||||
}
|
||||
|
||||
if metadata is not None:
|
||||
profile['metadata'] = metadata
|
||||
|
||||
data = self._clustering_client.post(
|
||||
'/profiles', json={'profile': profile},
|
||||
error_message="Error creating profile {name}".format(name=name))
|
||||
|
||||
return self._get_and_munchify('profile', data)
|
||||
|
||||
def set_cluster_profile_metadata(self, name_or_id, metadata):
|
||||
profile = self.get_cluster_profile(name_or_id)
|
||||
if not profile:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid Profile {profile}'.format(profile=name_or_id))
|
||||
|
||||
self._clustering_client.post(
|
||||
'/profiles/{profile_id}/metadata'.format(profile_id=profile['id']),
|
||||
json={'metadata': metadata},
|
||||
error_message='Error updating profile metadata')
|
||||
|
||||
def search_cluster_profiles(self, name_or_id=None, filters=None):
|
||||
cluster_profiles = self.list_cluster_profiles()
|
||||
return _utils._filter_list(cluster_profiles, name_or_id, filters)
|
||||
|
||||
def list_cluster_profiles(self):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
'/profiles',
|
||||
error_message="Error fetching profiles")
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return []
|
||||
return self._get_and_munchify('profiles', data)
|
||||
|
||||
def get_cluster_profile_by_id(self, profile_id):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
"/profiles/{profile_id}".format(profile_id=profile_id),
|
||||
error_message="Error fetching profile {name}".format(
|
||||
name=profile_id))
|
||||
return self._get_and_munchify('profile', data)
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return None
|
||||
|
||||
def get_cluster_profile(self, name_or_id, filters=None):
|
||||
return _utils._get_entity(self, 'cluster_profile', name_or_id, filters)
|
||||
|
||||
def delete_cluster_profile(self, name_or_id):
|
||||
profile = self.get_cluster_profile(name_or_id)
|
||||
if profile is None:
|
||||
self.log.debug("Profile %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
for cluster in self.list_clusters():
|
||||
if (name_or_id, profile.id) in cluster.items():
|
||||
self.log.debug(
|
||||
"Profile %s is being used by cluster %s, won't delete",
|
||||
name_or_id, cluster.name)
|
||||
return False
|
||||
|
||||
self._clustering_client.delete(
|
||||
"/profiles/{profile_id}".format(profile_id=profile['id']),
|
||||
error_message="Error deleting profile "
|
||||
"{name}".format(name=name_or_id))
|
||||
|
||||
return True
|
||||
|
||||
def update_cluster_profile(self, name_or_id, metadata=None, new_name=None):
|
||||
old_profile = self.get_cluster_profile(name_or_id)
|
||||
if not old_profile:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid Profile {profile}'.format(profile=name_or_id))
|
||||
|
||||
profile = {}
|
||||
|
||||
if metadata is not None:
|
||||
profile['metadata'] = metadata
|
||||
|
||||
if new_name is not None:
|
||||
profile['name'] = new_name
|
||||
|
||||
data = self._clustering_client.patch(
|
||||
"/profiles/{profile_id}".format(profile_id=old_profile.id),
|
||||
json={'profile': profile},
|
||||
error_message="Error updating profile {name}".format(
|
||||
name=name_or_id))
|
||||
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
def create_cluster_policy(self, name, spec):
|
||||
policy = {
|
||||
'name': name,
|
||||
'spec': spec
|
||||
}
|
||||
|
||||
data = self._clustering_client.post(
|
||||
'/policies', json={'policy': policy},
|
||||
error_message="Error creating policy {name}".format(
|
||||
name=policy['name']))
|
||||
return self._get_and_munchify('policy', data)
|
||||
|
||||
def search_cluster_policies(self, name_or_id=None, filters=None):
|
||||
cluster_policies = self.list_cluster_policies()
|
||||
return _utils._filter_list(cluster_policies, name_or_id, filters)
|
||||
|
||||
def list_cluster_policies(self):
|
||||
endpoint = "/policies"
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
endpoint,
|
||||
error_message="Error fetching cluster policies")
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return []
|
||||
return self._get_and_munchify('policies', data)
|
||||
|
||||
def get_cluster_policy_by_id(self, policy_id):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
"/policies/{policy_id}".format(policy_id=policy_id),
|
||||
error_message="Error fetching policy {name}".format(
|
||||
name=policy_id))
|
||||
return self._get_and_munchify('policy', data)
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return None
|
||||
|
||||
def get_cluster_policy(self, name_or_id, filters=None):
|
||||
return _utils._get_entity(
|
||||
self, 'cluster_policie', name_or_id, filters)
|
||||
|
||||
def delete_cluster_policy(self, name_or_id):
|
||||
policy = self.get_cluster_policy_by_id(name_or_id)
|
||||
if policy is None:
|
||||
self.log.debug("Policy %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
for cluster in self.list_clusters():
|
||||
if (name_or_id, policy.id) in cluster.items():
|
||||
self.log.debug(
|
||||
"Policy %s is being used by cluster %s, won't delete",
|
||||
name_or_id, cluster.name)
|
||||
return False
|
||||
|
||||
self._clustering_client.delete(
|
||||
"/policies/{policy_id}".format(policy_id=name_or_id),
|
||||
error_message="Error deleting policy "
|
||||
"{name}".format(name=name_or_id))
|
||||
|
||||
return True
|
||||
|
||||
def update_cluster_policy(self, name_or_id, new_name):
|
||||
old_policy = self.get_cluster_policy(name_or_id)
|
||||
if not old_policy:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid Policy {policy}'.format(policy=name_or_id))
|
||||
policy = {'name': new_name}
|
||||
|
||||
data = self._clustering_client.patch(
|
||||
"/policies/{policy_id}".format(policy_id=old_policy.id),
|
||||
json={'policy': policy},
|
||||
error_message="Error updating policy "
|
||||
"{name}".format(name=name_or_id))
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
def create_cluster_receiver(self, name, receiver_type,
|
||||
cluster_name_or_id=None, action=None,
|
||||
actor=None, params=None):
|
||||
cluster = self.get_cluster(cluster_name_or_id)
|
||||
if cluster is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid cluster {cluster}'.format(cluster=cluster_name_or_id))
|
||||
|
||||
receiver = {
|
||||
'name': name,
|
||||
'type': receiver_type
|
||||
}
|
||||
|
||||
if cluster_name_or_id is not None:
|
||||
receiver['cluster_id'] = cluster.id
|
||||
|
||||
if action is not None:
|
||||
receiver['action'] = action
|
||||
|
||||
if actor is not None:
|
||||
receiver['actor'] = actor
|
||||
|
||||
if params is not None:
|
||||
receiver['params'] = params
|
||||
|
||||
data = self._clustering_client.post(
|
||||
'/receivers', json={'receiver': receiver},
|
||||
error_message="Error creating receiver {name}".format(name=name))
|
||||
return self._get_and_munchify('receiver', data)
|
||||
|
||||
def search_cluster_receivers(self, name_or_id=None, filters=None):
|
||||
cluster_receivers = self.list_cluster_receivers()
|
||||
return _utils._filter_list(cluster_receivers, name_or_id, filters)
|
||||
|
||||
def list_cluster_receivers(self):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
'/receivers',
|
||||
error_message="Error fetching receivers")
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return []
|
||||
return self._get_and_munchify('receivers', data)
|
||||
|
||||
def get_cluster_receiver_by_id(self, receiver_id):
|
||||
try:
|
||||
data = self._clustering_client.get(
|
||||
"/receivers/{receiver_id}".format(receiver_id=receiver_id),
|
||||
error_message="Error fetching receiver {name}".format(
|
||||
name=receiver_id))
|
||||
return self._get_and_munchify('receiver', data)
|
||||
except exc.OpenStackCloudURINotFound as e:
|
||||
self.log.debug(str(e), exc_info=True)
|
||||
return None
|
||||
|
||||
def get_cluster_receiver(self, name_or_id, filters=None):
|
||||
return _utils._get_entity(
|
||||
self, 'cluster_receiver', name_or_id, filters)
|
||||
|
||||
def delete_cluster_receiver(self, name_or_id, wait=False, timeout=3600):
|
||||
receiver = self.get_cluster_receiver(name_or_id)
|
||||
if receiver is None:
|
||||
self.log.debug("Receiver %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
receiver_id = receiver['id']
|
||||
|
||||
self._clustering_client.delete(
|
||||
"/receivers/{receiver_id}".format(receiver_id=receiver_id),
|
||||
error_message="Error deleting receiver {name}".format(
|
||||
name=name_or_id))
|
||||
|
||||
if not wait:
|
||||
return True
|
||||
|
||||
for count in utils.iterate_timeout(
|
||||
timeout, "Timeout waiting for cluster receiver to delete"):
|
||||
|
||||
receiver = self.get_cluster_receiver_by_id(receiver_id)
|
||||
|
||||
if not receiver:
|
||||
break
|
||||
|
||||
return True
|
||||
|
||||
def update_cluster_receiver(self, name_or_id, new_name=None, action=None,
|
||||
params=None):
|
||||
old_receiver = self.get_cluster_receiver(name_or_id)
|
||||
if old_receiver is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Invalid receiver {receiver}'.format(receiver=name_or_id))
|
||||
|
||||
receiver = {}
|
||||
|
||||
if new_name is not None:
|
||||
receiver['name'] = new_name
|
||||
|
||||
if action is not None:
|
||||
receiver['action'] = action
|
||||
|
||||
if params is not None:
|
||||
receiver['params'] = params
|
||||
|
||||
data = self._clustering_client.patch(
|
||||
"/receivers/{receiver_id}".format(receiver_id=old_receiver.id),
|
||||
json={'receiver': receiver},
|
||||
error_message="Error updating receiver {name}".format(
|
||||
name=name_or_id))
|
||||
return self._get_and_munchify(key=None, data=data)
|
|
@ -0,0 +1,425 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import types # noqa
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
|
||||
|
||||
class CoeCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _container_infra_client(self):
|
||||
if 'container-infra' not in self._raw_clients:
|
||||
self._raw_clients['container-infra'] = self._get_raw_client(
|
||||
'container-infra')
|
||||
return self._raw_clients['container-infra']
|
||||
|
||||
@_utils.cache_on_arguments()
|
||||
def list_coe_clusters(self):
|
||||
"""List COE(Ccontainer Orchestration Engine) cluster.
|
||||
|
||||
:returns: a list of dicts containing the cluster.
|
||||
|
||||
:raises: ``OpenStackCloudException``: if something goes wrong during
|
||||
the OpenStack API call.
|
||||
"""
|
||||
with _utils.shade_exceptions("Error fetching cluster list"):
|
||||
data = self._container_infra_client.get('/clusters')
|
||||
return self._normalize_coe_clusters(
|
||||
self._get_and_munchify('clusters', data))
|
||||
|
||||
def search_coe_clusters(
|
||||
self, name_or_id=None, filters=None):
|
||||
"""Search COE cluster.
|
||||
|
||||
:param name_or_id: cluster name or ID.
|
||||
:param filters: a dict containing additional filters to use.
|
||||
:param detail: a boolean to control if we need summarized or
|
||||
detailed output.
|
||||
|
||||
:returns: a list of dict containing the cluster
|
||||
|
||||
:raises: ``OpenStackCloudException``: if something goes wrong during
|
||||
the OpenStack API call.
|
||||
"""
|
||||
coe_clusters = self.list_coe_clusters()
|
||||
return _utils._filter_list(
|
||||
coe_clusters, name_or_id, filters)
|
||||
|
||||
def get_coe_cluster(self, name_or_id, filters=None):
|
||||
"""Get a COE cluster by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the cluster.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A cluster dict or None if no matching cluster is found.
|
||||
"""
|
||||
return _utils._get_entity(self, 'coe_cluster', name_or_id,
|
||||
filters=filters)
|
||||
|
||||
def create_coe_cluster(
|
||||
self, name, cluster_template_id, **kwargs):
|
||||
"""Create a COE cluster based on given cluster template.
|
||||
|
||||
:param string name: Name of the cluster.
|
||||
:param string image_id: ID of the cluster template to use.
|
||||
|
||||
Other arguments will be passed in kwargs.
|
||||
|
||||
:returns: a dict containing the cluster description
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during
|
||||
the OpenStack API call
|
||||
"""
|
||||
error_message = ("Error creating cluster of name"
|
||||
" {cluster_name}".format(cluster_name=name))
|
||||
with _utils.shade_exceptions(error_message):
|
||||
body = kwargs.copy()
|
||||
body['name'] = name
|
||||
body['cluster_template_id'] = cluster_template_id
|
||||
|
||||
cluster = self._container_infra_client.post(
|
||||
'/clusters', json=body)
|
||||
|
||||
self.list_coe_clusters.invalidate(self)
|
||||
return cluster
|
||||
|
||||
def delete_coe_cluster(self, name_or_id):
|
||||
"""Delete a COE cluster.
|
||||
|
||||
:param name_or_id: Name or unique ID of the cluster.
|
||||
:returns: True if the delete succeeded, False if the
|
||||
cluster was not found.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
cluster = self.get_coe_cluster(name_or_id)
|
||||
|
||||
if not cluster:
|
||||
self.log.debug(
|
||||
"COE Cluster %(name_or_id)s does not exist",
|
||||
{'name_or_id': name_or_id},
|
||||
exc_info=True)
|
||||
return False
|
||||
|
||||
with _utils.shade_exceptions("Error in deleting COE cluster"):
|
||||
self._container_infra_client.delete(
|
||||
'/clusters/{id}'.format(id=cluster['id']))
|
||||
self.list_coe_clusters.invalidate(self)
|
||||
|
||||
return True
|
||||
|
||||
@_utils.valid_kwargs('node_count')
|
||||
def update_coe_cluster(self, name_or_id, operation, **kwargs):
|
||||
"""Update a COE cluster.
|
||||
|
||||
:param name_or_id: Name or ID of the COE cluster being updated.
|
||||
:param operation: Operation to perform - add, remove, replace.
|
||||
|
||||
Other arguments will be passed with kwargs.
|
||||
|
||||
:returns: a dict representing the updated cluster.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
self.list_coe_clusters.invalidate(self)
|
||||
cluster = self.get_coe_cluster(name_or_id)
|
||||
if not cluster:
|
||||
raise exc.OpenStackCloudException(
|
||||
"COE cluster %s not found." % name_or_id)
|
||||
|
||||
if operation not in ['add', 'replace', 'remove']:
|
||||
raise TypeError(
|
||||
"%s operation not in 'add', 'replace', 'remove'" % operation)
|
||||
|
||||
patches = _utils.generate_patches_from_kwargs(operation, **kwargs)
|
||||
# No need to fire an API call if there is an empty patch
|
||||
if not patches:
|
||||
return cluster
|
||||
|
||||
with _utils.shade_exceptions(
|
||||
"Error updating COE cluster {0}".format(name_or_id)):
|
||||
self._container_infra_client.patch(
|
||||
'/clusters/{id}'.format(id=cluster['id']),
|
||||
json=patches)
|
||||
|
||||
new_cluster = self.get_coe_cluster(name_or_id)
|
||||
return new_cluster
|
||||
|
||||
def get_coe_cluster_certificate(self, cluster_id):
|
||||
"""Get details about the CA certificate for a cluster by name or ID.
|
||||
|
||||
:param cluster_id: ID of the cluster.
|
||||
|
||||
:returns: Details about the CA certificate for the given cluster.
|
||||
"""
|
||||
msg = ("Error fetching CA cert for the cluster {cluster_id}".format(
|
||||
cluster_id=cluster_id))
|
||||
url = "/certificates/{cluster_id}".format(cluster_id=cluster_id)
|
||||
data = self._container_infra_client.get(url,
|
||||
error_message=msg)
|
||||
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
def sign_coe_cluster_certificate(self, cluster_id, csr):
|
||||
"""Sign client key and generate the CA certificate for a cluster
|
||||
|
||||
:param cluster_id: UUID of the cluster.
|
||||
:param csr: Certificate Signing Request (CSR) for authenticating
|
||||
client key.The CSR will be used by Magnum to generate
|
||||
a signed certificate that client will use to communicate
|
||||
with the cluster.
|
||||
|
||||
:returns: a dict representing the signed certs.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
error_message = ("Error signing certs for cluster"
|
||||
" {cluster_id}".format(cluster_id=cluster_id))
|
||||
with _utils.shade_exceptions(error_message):
|
||||
body = {}
|
||||
body['cluster_uuid'] = cluster_id
|
||||
body['csr'] = csr
|
||||
|
||||
certs = self._container_infra_client.post(
|
||||
'/certificates', json=body)
|
||||
|
||||
return self._get_and_munchify(key=None, data=certs)
|
||||
|
||||
@_utils.cache_on_arguments()
|
||||
def list_cluster_templates(self, detail=False):
|
||||
"""List cluster templates.
|
||||
|
||||
:param bool detail. Ignored. Included for backwards compat.
|
||||
ClusterTemplates are always returned with full details.
|
||||
|
||||
:returns: a list of dicts containing the cluster template details.
|
||||
|
||||
:raises: ``OpenStackCloudException``: if something goes wrong during
|
||||
the OpenStack API call.
|
||||
"""
|
||||
with _utils.shade_exceptions("Error fetching cluster template list"):
|
||||
try:
|
||||
data = self._container_infra_client.get('/clustertemplates')
|
||||
# NOTE(flwang): Magnum adds /clustertemplates and /cluster
|
||||
# to deprecate /baymodels and /bay since Newton release. So
|
||||
# we're using a small tag to indicate if current
|
||||
# cloud has those two new API endpoints.
|
||||
self._container_infra_client._has_magnum_after_newton = True
|
||||
return self._normalize_cluster_templates(
|
||||
self._get_and_munchify('clustertemplates', data))
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
data = self._container_infra_client.get('/baymodels/detail')
|
||||
return self._normalize_cluster_templates(
|
||||
self._get_and_munchify('baymodels', data))
|
||||
list_baymodels = list_cluster_templates
|
||||
list_coe_cluster_templates = list_cluster_templates
|
||||
|
||||
def search_cluster_templates(
|
||||
self, name_or_id=None, filters=None, detail=False):
|
||||
"""Search cluster templates.
|
||||
|
||||
:param name_or_id: cluster template name or ID.
|
||||
:param filters: a dict containing additional filters to use.
|
||||
:param detail: a boolean to control if we need summarized or
|
||||
detailed output.
|
||||
|
||||
:returns: a list of dict containing the cluster templates
|
||||
|
||||
:raises: ``OpenStackCloudException``: if something goes wrong during
|
||||
the OpenStack API call.
|
||||
"""
|
||||
cluster_templates = self.list_cluster_templates(detail=detail)
|
||||
return _utils._filter_list(
|
||||
cluster_templates, name_or_id, filters)
|
||||
search_baymodels = search_cluster_templates
|
||||
search_coe_cluster_templates = search_cluster_templates
|
||||
|
||||
def get_cluster_template(self, name_or_id, filters=None, detail=False):
|
||||
"""Get a cluster template by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the cluster template.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A cluster template dict or None if no matching
|
||||
cluster template is found.
|
||||
"""
|
||||
return _utils._get_entity(self, 'cluster_template', name_or_id,
|
||||
filters=filters, detail=detail)
|
||||
get_baymodel = get_cluster_template
|
||||
get_coe_cluster_template = get_cluster_template
|
||||
|
||||
def create_cluster_template(
|
||||
self, name, image_id=None, keypair_id=None, coe=None, **kwargs):
|
||||
"""Create a cluster template.
|
||||
|
||||
:param string name: Name of the cluster template.
|
||||
:param string image_id: Name or ID of the image to use.
|
||||
:param string keypair_id: Name or ID of the keypair to use.
|
||||
:param string coe: Name of the coe for the cluster template.
|
||||
|
||||
Other arguments will be passed in kwargs.
|
||||
|
||||
:returns: a dict containing the cluster template description
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during
|
||||
the OpenStack API call
|
||||
"""
|
||||
error_message = ("Error creating cluster template of name"
|
||||
" {cluster_template_name}".format(
|
||||
cluster_template_name=name))
|
||||
with _utils.shade_exceptions(error_message):
|
||||
body = kwargs.copy()
|
||||
body['name'] = name
|
||||
body['image_id'] = image_id
|
||||
body['keypair_id'] = keypair_id
|
||||
body['coe'] = coe
|
||||
|
||||
try:
|
||||
cluster_template = self._container_infra_client.post(
|
||||
'/clustertemplates', json=body)
|
||||
self._container_infra_client._has_magnum_after_newton = True
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
cluster_template = self._container_infra_client.post(
|
||||
'/baymodels', json=body)
|
||||
|
||||
self.list_cluster_templates.invalidate(self)
|
||||
return cluster_template
|
||||
create_baymodel = create_cluster_template
|
||||
create_coe_cluster_template = create_cluster_template
|
||||
|
||||
def delete_cluster_template(self, name_or_id):
|
||||
"""Delete a cluster template.
|
||||
|
||||
:param name_or_id: Name or unique ID of the cluster template.
|
||||
:returns: True if the delete succeeded, False if the
|
||||
cluster template was not found.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
cluster_template = self.get_cluster_template(name_or_id)
|
||||
|
||||
if not cluster_template:
|
||||
self.log.debug(
|
||||
"Cluster template %(name_or_id)s does not exist",
|
||||
{'name_or_id': name_or_id},
|
||||
exc_info=True)
|
||||
return False
|
||||
|
||||
with _utils.shade_exceptions("Error in deleting cluster template"):
|
||||
if getattr(self._container_infra_client,
|
||||
'_has_magnum_after_newton', False):
|
||||
self._container_infra_client.delete(
|
||||
'/clustertemplates/{id}'.format(id=cluster_template['id']))
|
||||
else:
|
||||
self._container_infra_client.delete(
|
||||
'/baymodels/{id}'.format(id=cluster_template['id']))
|
||||
self.list_cluster_templates.invalidate(self)
|
||||
|
||||
return True
|
||||
delete_baymodel = delete_cluster_template
|
||||
delete_coe_cluster_template = delete_cluster_template
|
||||
|
||||
@_utils.valid_kwargs('name', 'image_id', 'flavor_id', 'master_flavor_id',
|
||||
'keypair_id', 'external_network_id', 'fixed_network',
|
||||
'dns_nameserver', 'docker_volume_size', 'labels',
|
||||
'coe', 'http_proxy', 'https_proxy', 'no_proxy',
|
||||
'network_driver', 'tls_disabled', 'public',
|
||||
'registry_enabled', 'volume_driver')
|
||||
def update_cluster_template(self, name_or_id, operation, **kwargs):
|
||||
"""Update a cluster template.
|
||||
|
||||
:param name_or_id: Name or ID of the cluster template being updated.
|
||||
:param operation: Operation to perform - add, remove, replace.
|
||||
|
||||
Other arguments will be passed with kwargs.
|
||||
|
||||
:returns: a dict representing the updated cluster template.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
self.list_cluster_templates.invalidate(self)
|
||||
cluster_template = self.get_cluster_template(name_or_id)
|
||||
if not cluster_template:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Cluster template %s not found." % name_or_id)
|
||||
|
||||
if operation not in ['add', 'replace', 'remove']:
|
||||
raise TypeError(
|
||||
"%s operation not in 'add', 'replace', 'remove'" % operation)
|
||||
|
||||
patches = _utils.generate_patches_from_kwargs(operation, **kwargs)
|
||||
# No need to fire an API call if there is an empty patch
|
||||
if not patches:
|
||||
return cluster_template
|
||||
|
||||
with _utils.shade_exceptions(
|
||||
"Error updating cluster template {0}".format(name_or_id)):
|
||||
if getattr(self._container_infra_client,
|
||||
'_has_magnum_after_newton', False):
|
||||
self._container_infra_client.patch(
|
||||
'/clustertemplates/{id}'.format(id=cluster_template['id']),
|
||||
json=patches)
|
||||
else:
|
||||
self._container_infra_client.patch(
|
||||
'/baymodels/{id}'.format(id=cluster_template['id']),
|
||||
json=patches)
|
||||
|
||||
new_cluster_template = self.get_cluster_template(name_or_id)
|
||||
return new_cluster_template
|
||||
update_baymodel = update_cluster_template
|
||||
update_coe_cluster_template = update_cluster_template
|
||||
|
||||
def list_magnum_services(self):
|
||||
"""List all Magnum services.
|
||||
:returns: a list of dicts containing the service details.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
with _utils.shade_exceptions("Error fetching Magnum services list"):
|
||||
data = self._container_infra_client.get('/mservices')
|
||||
return self._normalize_magnum_services(
|
||||
self._get_and_munchify('mservices', data))
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,296 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import types # noqa
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
|
||||
|
||||
class DnsCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _dns_client(self):
|
||||
if 'dns' not in self._raw_clients:
|
||||
dns_client = self._get_versioned_client(
|
||||
'dns', min_version=2, max_version='2.latest')
|
||||
self._raw_clients['dns'] = dns_client
|
||||
return self._raw_clients['dns']
|
||||
|
||||
def list_zones(self):
|
||||
"""List all available zones.
|
||||
|
||||
:returns: A list of zones dicts.
|
||||
|
||||
"""
|
||||
data = self._dns_client.get(
|
||||
"/zones",
|
||||
error_message="Error fetching zones list")
|
||||
return self._get_and_munchify('zones', data)
|
||||
|
||||
def get_zone(self, name_or_id, filters=None):
|
||||
"""Get a zone by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the zone
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A zone dict or None if no matching zone is found.
|
||||
|
||||
"""
|
||||
return _utils._get_entity(self, 'zone', name_or_id, filters)
|
||||
|
||||
def search_zones(self, name_or_id=None, filters=None):
|
||||
zones = self.list_zones()
|
||||
return _utils._filter_list(zones, name_or_id, filters)
|
||||
|
||||
def create_zone(self, name, zone_type=None, email=None, description=None,
|
||||
ttl=None, masters=None):
|
||||
"""Create a new zone.
|
||||
|
||||
:param name: Name of the zone being created.
|
||||
:param zone_type: Type of the zone (primary/secondary)
|
||||
:param email: Email of the zone owner (only
|
||||
applies if zone_type is primary)
|
||||
:param description: Description of the zone
|
||||
:param ttl: TTL (Time to live) value in seconds
|
||||
:param masters: Master nameservers (only applies
|
||||
if zone_type is secondary)
|
||||
|
||||
:returns: a dict representing the created zone.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
# We capitalize in case the user passes time in lowercase, as
|
||||
# designate call expects PRIMARY/SECONDARY
|
||||
if zone_type is not None:
|
||||
zone_type = zone_type.upper()
|
||||
if zone_type not in ('PRIMARY', 'SECONDARY'):
|
||||
raise exc.OpenStackCloudException(
|
||||
"Invalid type %s, valid choices are PRIMARY or SECONDARY" %
|
||||
zone_type)
|
||||
|
||||
zone = {
|
||||
"name": name,
|
||||
"email": email,
|
||||
"description": description,
|
||||
}
|
||||
if ttl is not None:
|
||||
zone["ttl"] = ttl
|
||||
|
||||
if zone_type is not None:
|
||||
zone["type"] = zone_type
|
||||
|
||||
if masters is not None:
|
||||
zone["masters"] = masters
|
||||
|
||||
data = self._dns_client.post(
|
||||
"/zones", json=zone,
|
||||
error_message="Unable to create zone {name}".format(name=name))
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
@_utils.valid_kwargs('email', 'description', 'ttl', 'masters')
|
||||
def update_zone(self, name_or_id, **kwargs):
|
||||
"""Update a zone.
|
||||
|
||||
:param name_or_id: Name or ID of the zone being updated.
|
||||
:param email: Email of the zone owner (only
|
||||
applies if zone_type is primary)
|
||||
:param description: Description of the zone
|
||||
:param ttl: TTL (Time to live) value in seconds
|
||||
:param masters: Master nameservers (only applies
|
||||
if zone_type is secondary)
|
||||
|
||||
:returns: a dict representing the updated zone.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
zone = self.get_zone(name_or_id)
|
||||
if not zone:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Zone %s not found." % name_or_id)
|
||||
|
||||
data = self._dns_client.patch(
|
||||
"/zones/{zone_id}".format(zone_id=zone['id']), json=kwargs,
|
||||
error_message="Error updating zone {0}".format(name_or_id))
|
||||
return self._get_and_munchify(key=None, data=data)
|
||||
|
||||
def delete_zone(self, name_or_id):
|
||||
"""Delete a zone.
|
||||
|
||||
:param name_or_id: Name or ID of the zone being deleted.
|
||||
|
||||
:returns: True if delete succeeded, False otherwise.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
zone = self.get_zone(name_or_id)
|
||||
if zone is None:
|
||||
self.log.debug("Zone %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
return self._dns_client.delete(
|
||||
"/zones/{zone_id}".format(zone_id=zone['id']),
|
||||
error_message="Error deleting zone {0}".format(name_or_id))
|
||||
|
||||
return True
|
||||
|
||||
def list_recordsets(self, zone):
|
||||
"""List all available recordsets.
|
||||
|
||||
:param zone: Name or ID of the zone managing the recordset
|
||||
|
||||
:returns: A list of recordsets.
|
||||
|
||||
"""
|
||||
zone_obj = self.get_zone(zone)
|
||||
if zone_obj is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Zone %s not found." % zone)
|
||||
return self._dns_client.get(
|
||||
"/zones/{zone_id}/recordsets".format(zone_id=zone_obj['id']),
|
||||
error_message="Error fetching recordsets list")['recordsets']
|
||||
|
||||
def get_recordset(self, zone, name_or_id):
|
||||
"""Get a recordset by name or ID.
|
||||
|
||||
:param zone: Name or ID of the zone managing the recordset
|
||||
:param name_or_id: Name or ID of the recordset
|
||||
|
||||
:returns: A recordset dict or None if no matching recordset is
|
||||
found.
|
||||
|
||||
"""
|
||||
zone_obj = self.get_zone(zone)
|
||||
if zone_obj is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Zone %s not found." % zone)
|
||||
try:
|
||||
return self._dns_client.get(
|
||||
"/zones/{zone_id}/recordsets/{recordset_id}".format(
|
||||
zone_id=zone_obj['id'], recordset_id=name_or_id),
|
||||
error_message="Error fetching recordset")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def search_recordsets(self, zone, name_or_id=None, filters=None):
|
||||
recordsets = self.list_recordsets(zone=zone)
|
||||
return _utils._filter_list(recordsets, name_or_id, filters)
|
||||
|
||||
def create_recordset(self, zone, name, recordset_type, records,
|
||||
description=None, ttl=None):
|
||||
"""Create a recordset.
|
||||
|
||||
:param zone: Name or ID of the zone managing the recordset
|
||||
:param name: Name of the recordset
|
||||
:param recordset_type: Type of the recordset
|
||||
:param records: List of the recordset definitions
|
||||
:param description: Description of the recordset
|
||||
:param ttl: TTL value of the recordset
|
||||
|
||||
:returns: a dict representing the created recordset.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
|
||||
"""
|
||||
zone_obj = self.get_zone(zone)
|
||||
if zone_obj is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Zone %s not found." % zone)
|
||||
|
||||
# We capitalize the type in case the user sends in lowercase
|
||||
recordset_type = recordset_type.upper()
|
||||
|
||||
body = {
|
||||
'name': name,
|
||||
'type': recordset_type,
|
||||
'records': records
|
||||
}
|
||||
|
||||
if description:
|
||||
body['description'] = description
|
||||
|
||||
if ttl:
|
||||
body['ttl'] = ttl
|
||||
|
||||
return self._dns_client.post(
|
||||
"/zones/{zone_id}/recordsets".format(zone_id=zone_obj['id']),
|
||||
json=body,
|
||||
error_message="Error creating recordset {name}".format(name=name))
|
||||
|
||||
@_utils.valid_kwargs('description', 'ttl', 'records')
|
||||
def update_recordset(self, zone, name_or_id, **kwargs):
|
||||
"""Update a recordset.
|
||||
|
||||
:param zone: Name or ID of the zone managing the recordset
|
||||
:param name_or_id: Name or ID of the recordset being updated.
|
||||
:param records: List of the recordset definitions
|
||||
:param description: Description of the recordset
|
||||
:param ttl: TTL (Time to live) value in seconds of the recordset
|
||||
|
||||
:returns: a dict representing the updated recordset.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
zone_obj = self.get_zone(zone)
|
||||
if zone_obj is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Zone %s not found." % zone)
|
||||
|
||||
recordset_obj = self.get_recordset(zone, name_or_id)
|
||||
if recordset_obj is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Recordset %s not found." % name_or_id)
|
||||
|
||||
new_recordset = self._dns_client.put(
|
||||
"/zones/{zone_id}/recordsets/{recordset_id}".format(
|
||||
zone_id=zone_obj['id'], recordset_id=name_or_id), json=kwargs,
|
||||
error_message="Error updating recordset {0}".format(name_or_id))
|
||||
|
||||
return new_recordset
|
||||
|
||||
def delete_recordset(self, zone, name_or_id):
|
||||
"""Delete a recordset.
|
||||
|
||||
:param zone: Name or ID of the zone managing the recordset.
|
||||
:param name_or_id: Name or ID of the recordset being deleted.
|
||||
|
||||
:returns: True if delete succeeded, False otherwise.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
|
||||
zone_obj = self.get_zone(zone)
|
||||
if zone_obj is None:
|
||||
self.log.debug("Zone %s not found for deleting", zone)
|
||||
return False
|
||||
|
||||
recordset = self.get_recordset(zone_obj['id'], name_or_id)
|
||||
if recordset is None:
|
||||
self.log.debug("Recordset %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
self._dns_client.delete(
|
||||
"/zones/{zone_id}/recordsets/{recordset_id}".format(
|
||||
zone_id=zone_obj['id'], recordset_id=name_or_id),
|
||||
error_message="Error deleting recordset {0}".format(name_or_id))
|
||||
|
||||
return True
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,432 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import types # noqa
|
||||
|
||||
import keystoneauth1.exceptions
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import meta
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
from openstack import proxy
|
||||
from openstack import utils
|
||||
|
||||
|
||||
def _no_pending_images(images):
|
||||
"""If there are any images not in a steady state, don't cache"""
|
||||
for image in images:
|
||||
if image.status not in ('active', 'deleted', 'killed'):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class ImageCloudMixin(_normalize.Normalizer):
|
||||
|
||||
def __init__(self):
|
||||
self.image_api_use_tasks = self.config.config['image_api_use_tasks']
|
||||
|
||||
@property
|
||||
def _raw_image_client(self):
|
||||
if 'raw-image' not in self._raw_clients:
|
||||
image_client = self._get_raw_client('image')
|
||||
self._raw_clients['raw-image'] = image_client
|
||||
return self._raw_clients['raw-image']
|
||||
|
||||
@property
|
||||
def _image_client(self):
|
||||
if 'image' not in self._raw_clients:
|
||||
self._raw_clients['image'] = self._get_versioned_client(
|
||||
'image', min_version=1, max_version='2.latest')
|
||||
return self._raw_clients['image']
|
||||
|
||||
def search_images(self, name_or_id=None, filters=None):
|
||||
images = self.list_images()
|
||||
return _utils._filter_list(images, name_or_id, filters)
|
||||
|
||||
@_utils.cache_on_arguments(should_cache_fn=_no_pending_images)
|
||||
def list_images(self, filter_deleted=True, show_all=False):
|
||||
"""Get available images.
|
||||
|
||||
:param filter_deleted: Control whether deleted images are returned.
|
||||
:param show_all: Show all images, including images that are shared
|
||||
but not accepted. (By default in glance v2 shared image that
|
||||
have not been accepted are not shown) show_all will override the
|
||||
value of filter_deleted to False.
|
||||
:returns: A list of glance images.
|
||||
"""
|
||||
if show_all:
|
||||
filter_deleted = False
|
||||
# First, try to actually get images from glance, it's more efficient
|
||||
images = []
|
||||
params = {}
|
||||
image_list = []
|
||||
try:
|
||||
if self._is_client_version('image', 2):
|
||||
endpoint = '/images'
|
||||
if show_all:
|
||||
params['member_status'] = 'all'
|
||||
else:
|
||||
endpoint = '/images/detail'
|
||||
|
||||
response = self._image_client.get(endpoint, params=params)
|
||||
|
||||
except keystoneauth1.exceptions.catalog.EndpointNotFound:
|
||||
# We didn't have glance, let's try nova
|
||||
# If this doesn't work - we just let the exception propagate
|
||||
response = proxy._json_response(
|
||||
self.compute.get('/images/detail'))
|
||||
while 'next' in response:
|
||||
image_list.extend(meta.obj_list_to_munch(response['images']))
|
||||
endpoint = response['next']
|
||||
# next links from glance have the version prefix. If the catalog
|
||||
# has a versioned endpoint, then we can't append the next link to
|
||||
# it. Strip the absolute prefix (/v1/ or /v2/ to turn it into
|
||||
# a proper relative link.
|
||||
if endpoint.startswith('/v'):
|
||||
endpoint = endpoint[4:]
|
||||
response = self._image_client.get(endpoint)
|
||||
if 'images' in response:
|
||||
image_list.extend(meta.obj_list_to_munch(response['images']))
|
||||
else:
|
||||
image_list.extend(response)
|
||||
|
||||
for image in image_list:
|
||||
# The cloud might return DELETED for invalid images.
|
||||
# While that's cute and all, that's an implementation detail.
|
||||
if not filter_deleted:
|
||||
images.append(image)
|
||||
elif image.status.lower() != 'deleted':
|
||||
images.append(image)
|
||||
return self._normalize_images(images)
|
||||
|
||||
def get_image(self, name_or_id, filters=None):
|
||||
"""Get an image by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the image.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: An image ``munch.Munch`` or None if no matching image
|
||||
is found
|
||||
|
||||
"""
|
||||
return _utils._get_entity(self, 'image', name_or_id, filters)
|
||||
|
||||
def get_image_by_id(self, id):
|
||||
""" Get a image by ID
|
||||
|
||||
:param id: ID of the image.
|
||||
:returns: An image ``munch.Munch``.
|
||||
"""
|
||||
data = self._image_client.get(
|
||||
'/images/{id}'.format(id=id),
|
||||
error_message="Error getting image with ID {id}".format(id=id)
|
||||
)
|
||||
key = 'image' if 'image' in data else None
|
||||
image = self._normalize_image(
|
||||
self._get_and_munchify(key, data))
|
||||
|
||||
return image
|
||||
|
||||
def download_image(
|
||||
self, name_or_id, output_path=None, output_file=None,
|
||||
chunk_size=1024):
|
||||
"""Download an image by name or ID
|
||||
|
||||
:param str name_or_id: Name or ID of the image.
|
||||
:param output_path: the output path to write the image to. Either this
|
||||
or output_file must be specified
|
||||
:param output_file: a file object (or file-like object) to write the
|
||||
image data to. Only write() will be called on this object. Either
|
||||
this or output_path must be specified
|
||||
:param int chunk_size: size in bytes to read from the wire and buffer
|
||||
at one time. Defaults to 1024
|
||||
|
||||
:raises: OpenStackCloudException in the event download_image is called
|
||||
without exactly one of either output_path or output_file
|
||||
:raises: OpenStackCloudResourceNotFound if no images are found matching
|
||||
the name or ID provided
|
||||
"""
|
||||
if output_path is None and output_file is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'No output specified, an output path or file object'
|
||||
' is necessary to write the image data to')
|
||||
elif output_path is not None and output_file is not None:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Both an output path and file object were provided,'
|
||||
' however only one can be used at once')
|
||||
|
||||
image = self.search_images(name_or_id)
|
||||
if len(image) == 0:
|
||||
raise exc.OpenStackCloudResourceNotFound(
|
||||
"No images with name or ID %s were found" % name_or_id, None)
|
||||
if self._is_client_version('image', 2):
|
||||
endpoint = '/images/{id}/file'.format(id=image[0]['id'])
|
||||
else:
|
||||
endpoint = '/images/{id}'.format(id=image[0]['id'])
|
||||
|
||||
response = self._image_client.get(endpoint, stream=True)
|
||||
|
||||
with _utils.shade_exceptions("Unable to download image"):
|
||||
if output_path:
|
||||
with open(output_path, 'wb') as fd:
|
||||
for chunk in response.iter_content(chunk_size=chunk_size):
|
||||
fd.write(chunk)
|
||||
return
|
||||
elif output_file:
|
||||
for chunk in response.iter_content(chunk_size=chunk_size):
|
||||
output_file.write(chunk)
|
||||
return
|
||||
|
||||
def get_image_exclude(self, name_or_id, exclude):
|
||||
for image in self.search_images(name_or_id):
|
||||
if exclude:
|
||||
if exclude not in image.name:
|
||||
return image
|
||||
else:
|
||||
return image
|
||||
return None
|
||||
|
||||
def get_image_name(self, image_id, exclude=None):
|
||||
image = self.get_image_exclude(image_id, exclude)
|
||||
if image:
|
||||
return image.name
|
||||
return None
|
||||
|
||||
def get_image_id(self, image_name, exclude=None):
|
||||
image = self.get_image_exclude(image_name, exclude)
|
||||
if image:
|
||||
return image.id
|
||||
return None
|
||||
|
||||
def wait_for_image(self, image, timeout=3600):
|
||||
image_id = image['id']
|
||||
for count in utils.iterate_timeout(
|
||||
timeout, "Timeout waiting for image to snapshot"):
|
||||
self.list_images.invalidate(self)
|
||||
image = self.get_image(image_id)
|
||||
if not image:
|
||||
continue
|
||||
if image['status'] == 'active':
|
||||
return image
|
||||
elif image['status'] == 'error':
|
||||
raise exc.OpenStackCloudException(
|
||||
'Image {image} hit error state'.format(image=image_id))
|
||||
|
||||
def delete_image(
|
||||
self, name_or_id, wait=False, timeout=3600, delete_objects=True):
|
||||
"""Delete an existing image.
|
||||
|
||||
:param name_or_id: Name of the image to be deleted.
|
||||
:param wait: If True, waits for image to be deleted.
|
||||
:param timeout: Seconds to wait for image deletion. None is forever.
|
||||
:param delete_objects: If True, also deletes uploaded swift objects.
|
||||
|
||||
:returns: True if delete succeeded, False otherwise.
|
||||
|
||||
:raises: OpenStackCloudException if there are problems deleting.
|
||||
"""
|
||||
image = self.get_image(name_or_id)
|
||||
if not image:
|
||||
return False
|
||||
self._image_client.delete(
|
||||
'/images/{id}'.format(id=image.id),
|
||||
error_message="Error in deleting image")
|
||||
self.list_images.invalidate(self)
|
||||
|
||||
# Task API means an image was uploaded to swift
|
||||
if self.image_api_use_tasks and (
|
||||
self._IMAGE_OBJECT_KEY in image
|
||||
or self._SHADE_IMAGE_OBJECT_KEY in image):
|
||||
(container, objname) = image.get(
|
||||
self._IMAGE_OBJECT_KEY, image.get(
|
||||
self._SHADE_IMAGE_OBJECT_KEY)).split('/', 1)
|
||||
self.delete_object(container=container, name=objname)
|
||||
|
||||
if wait:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the image to be deleted."):
|
||||
self._get_cache(None).invalidate()
|
||||
if self.get_image(image.id) is None:
|
||||
break
|
||||
return True
|
||||
|
||||
def create_image(
|
||||
self, name, filename=None,
|
||||
container=None,
|
||||
md5=None, sha256=None,
|
||||
disk_format=None, container_format=None,
|
||||
disable_vendor_agent=True,
|
||||
wait=False, timeout=3600,
|
||||
allow_duplicates=False, meta=None, volume=None, **kwargs):
|
||||
"""Upload an image.
|
||||
|
||||
:param str name: Name of the image to create. If it is a pathname
|
||||
of an image, the name will be constructed from the
|
||||
extensionless basename of the path.
|
||||
:param str filename: The path to the file to upload, if needed.
|
||||
(optional, defaults to None)
|
||||
:param str container: Name of the container in swift where images
|
||||
should be uploaded for import if the cloud
|
||||
requires such a thing. (optiona, defaults to
|
||||
'images')
|
||||
:param str md5: md5 sum of the image file. If not given, an md5 will
|
||||
be calculated.
|
||||
:param str sha256: sha256 sum of the image file. If not given, an md5
|
||||
will be calculated.
|
||||
:param str disk_format: The disk format the image is in. (optional,
|
||||
defaults to the os-client-config config value
|
||||
for this cloud)
|
||||
:param str container_format: The container format the image is in.
|
||||
(optional, defaults to the
|
||||
os-client-config config value for this
|
||||
cloud)
|
||||
:param bool disable_vendor_agent: Whether or not to append metadata
|
||||
flags to the image to inform the
|
||||
cloud in question to not expect a
|
||||
vendor agent to be runing.
|
||||
(optional, defaults to True)
|
||||
:param bool wait: If true, waits for image to be created. Defaults to
|
||||
true - however, be aware that one of the upload
|
||||
methods is always synchronous.
|
||||
:param timeout: Seconds to wait for image creation. None is forever.
|
||||
:param allow_duplicates: If true, skips checks that enforce unique
|
||||
image name. (optional, defaults to False)
|
||||
:param meta: A dict of key/value pairs to use for metadata that
|
||||
bypasses automatic type conversion.
|
||||
:param volume: Name or ID or volume object of a volume to create an
|
||||
image from. Mutually exclusive with (optional, defaults
|
||||
to None)
|
||||
|
||||
Additional kwargs will be passed to the image creation as additional
|
||||
metadata for the image and will have all values converted to string
|
||||
except for min_disk, min_ram, size and virtual_size which will be
|
||||
converted to int.
|
||||
|
||||
If you are sure you have all of your data types correct or have an
|
||||
advanced need to be explicit, use meta. If you are just a normal
|
||||
consumer, using kwargs is likely the right choice.
|
||||
|
||||
If a value is in meta and kwargs, meta wins.
|
||||
|
||||
:returns: A ``munch.Munch`` of the Image object
|
||||
|
||||
:raises: OpenStackCloudException if there are problems uploading
|
||||
"""
|
||||
if volume:
|
||||
image = self.block_storage.create_image(
|
||||
name=name, volume=volume,
|
||||
allow_duplicates=allow_duplicates,
|
||||
container_format=container_format, disk_format=disk_format,
|
||||
wait=wait, timeout=timeout)
|
||||
else:
|
||||
image = self.image.create_image(
|
||||
name, filename=filename,
|
||||
container=container,
|
||||
md5=sha256, sha256=sha256,
|
||||
disk_format=disk_format, container_format=container_format,
|
||||
disable_vendor_agent=disable_vendor_agent,
|
||||
wait=wait, timeout=timeout,
|
||||
allow_duplicates=allow_duplicates, meta=meta, **kwargs)
|
||||
|
||||
self._get_cache(None).invalidate()
|
||||
if not wait:
|
||||
return image
|
||||
try:
|
||||
for count in utils.iterate_timeout(
|
||||
timeout,
|
||||
"Timeout waiting for the image to finish."):
|
||||
image_obj = self.get_image(image.id)
|
||||
if image_obj and image_obj.status not in ('queued', 'saving'):
|
||||
return image_obj
|
||||
except exc.OpenStackCloudTimeout:
|
||||
self.log.debug(
|
||||
"Timeout waiting for image to become ready. Deleting.")
|
||||
self.delete_image(image.id, wait=True)
|
||||
raise
|
||||
|
||||
def update_image_properties(
|
||||
self, image=None, name_or_id=None, meta=None, **properties):
|
||||
image = image or name_or_id
|
||||
return self.image.update_image_properties(
|
||||
image=image, meta=meta, **properties)
|
||||
|
||||
def set_volume_quotas(self, name_or_id, **kwargs):
|
||||
""" Set a volume quota in a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:param kwargs: key/value pairs of quota name and quota value
|
||||
|
||||
:raises: OpenStackCloudException if the resource to set the
|
||||
quota does not exist.
|
||||
"""
|
||||
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
kwargs['tenant_id'] = proj.id
|
||||
self._volume_client.put(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
json={'quota_set': kwargs},
|
||||
error_message="No valid quota or resource")
|
||||
|
||||
def get_volume_quotas(self, name_or_id):
|
||||
""" Get volume quotas for a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:raises: OpenStackCloudException if it's not a valid project
|
||||
|
||||
:returns: Munch object with the quotas
|
||||
"""
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
data = self._volume_client.get(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
error_message="cinder client call failed")
|
||||
return self._get_and_munchify('quota_set', data)
|
||||
|
||||
def delete_volume_quotas(self, name_or_id):
|
||||
""" Delete volume quotas for a project
|
||||
|
||||
:param name_or_id: project name or id
|
||||
:raises: OpenStackCloudException if it's not a valid project or the
|
||||
cinder client call failed
|
||||
|
||||
:returns: dict with the quotas
|
||||
"""
|
||||
proj = self.get_project(name_or_id)
|
||||
if not proj:
|
||||
raise exc.OpenStackCloudException("project does not exist")
|
||||
|
||||
return self._volume_client.delete(
|
||||
'/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id),
|
||||
error_message="cinder client call failed")
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,370 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import threading
|
||||
import types # noqa
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
|
||||
|
||||
class NetworkCommonCloudMixin(_normalize.Normalizer):
|
||||
"""Shared networking functions used by FloatingIP, Network, Compute classes
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._external_ipv4_names = self.config.get_external_ipv4_networks()
|
||||
self._internal_ipv4_names = self.config.get_internal_ipv4_networks()
|
||||
self._external_ipv6_names = self.config.get_external_ipv6_networks()
|
||||
self._internal_ipv6_names = self.config.get_internal_ipv6_networks()
|
||||
self._nat_destination = self.config.get_nat_destination()
|
||||
self._nat_source = self.config.get_nat_source()
|
||||
self._default_network = self.config.get_default_network()
|
||||
|
||||
self._use_external_network = self.config.config.get(
|
||||
'use_external_network', True)
|
||||
self._use_internal_network = self.config.config.get(
|
||||
'use_internal_network', True)
|
||||
|
||||
self._networks_lock = threading.Lock()
|
||||
self._reset_network_caches()
|
||||
|
||||
def use_external_network(self):
|
||||
return self._use_external_network
|
||||
|
||||
def use_internal_network(self):
|
||||
return self._use_internal_network
|
||||
|
||||
def _reset_network_caches(self):
|
||||
# Variables to prevent us from going through the network finding
|
||||
# logic again if we've done it once. This is different from just
|
||||
# the cached value, since "None" is a valid value to find.
|
||||
with self._networks_lock:
|
||||
self._external_ipv4_networks = []
|
||||
self._external_ipv4_floating_networks = []
|
||||
self._internal_ipv4_networks = []
|
||||
self._external_ipv6_networks = []
|
||||
self._internal_ipv6_networks = []
|
||||
self._nat_destination_network = None
|
||||
self._nat_source_network = None
|
||||
self._default_network_network = None
|
||||
self._network_list_stamp = False
|
||||
|
||||
def _set_interesting_networks(self):
|
||||
external_ipv4_networks = []
|
||||
external_ipv4_floating_networks = []
|
||||
internal_ipv4_networks = []
|
||||
external_ipv6_networks = []
|
||||
internal_ipv6_networks = []
|
||||
nat_destination = None
|
||||
nat_source = None
|
||||
default_network = None
|
||||
|
||||
all_subnets = None
|
||||
|
||||
# Filter locally because we have an or condition
|
||||
try:
|
||||
# TODO(mordred): Rackspace exposes neutron but it does not
|
||||
# work. I think that overriding what the service catalog
|
||||
# reports should be a thing os-client-config should handle
|
||||
# in a vendor profile - but for now it does not. That means
|
||||
# this search_networks can just totally fail. If it does
|
||||
# though, that's fine, clearly the neutron introspection is
|
||||
# not going to work.
|
||||
all_networks = self.list_networks()
|
||||
except exc.OpenStackCloudException:
|
||||
self._network_list_stamp = True
|
||||
return
|
||||
|
||||
for network in all_networks:
|
||||
|
||||
# External IPv4 networks
|
||||
if (network['name'] in self._external_ipv4_names
|
||||
or network['id'] in self._external_ipv4_names):
|
||||
external_ipv4_networks.append(network)
|
||||
elif ((('router:external' in network
|
||||
and network['router:external'])
|
||||
or network.get('provider:physical_network'))
|
||||
and network['name'] not in self._internal_ipv4_names
|
||||
and network['id'] not in self._internal_ipv4_names):
|
||||
external_ipv4_networks.append(network)
|
||||
|
||||
# Internal networks
|
||||
if (network['name'] in self._internal_ipv4_names
|
||||
or network['id'] in self._internal_ipv4_names):
|
||||
internal_ipv4_networks.append(network)
|
||||
elif (not network.get('router:external', False)
|
||||
and not network.get('provider:physical_network')
|
||||
and network['name'] not in self._external_ipv4_names
|
||||
and network['id'] not in self._external_ipv4_names):
|
||||
internal_ipv4_networks.append(network)
|
||||
|
||||
# External networks
|
||||
if (network['name'] in self._external_ipv6_names
|
||||
or network['id'] in self._external_ipv6_names):
|
||||
external_ipv6_networks.append(network)
|
||||
elif (network.get('router:external')
|
||||
and network['name'] not in self._internal_ipv6_names
|
||||
and network['id'] not in self._internal_ipv6_names):
|
||||
external_ipv6_networks.append(network)
|
||||
|
||||
# Internal networks
|
||||
if (network['name'] in self._internal_ipv6_names
|
||||
or network['id'] in self._internal_ipv6_names):
|
||||
internal_ipv6_networks.append(network)
|
||||
elif (not network.get('router:external', False)
|
||||
and network['name'] not in self._external_ipv6_names
|
||||
and network['id'] not in self._external_ipv6_names):
|
||||
internal_ipv6_networks.append(network)
|
||||
|
||||
# External Floating IPv4 networks
|
||||
if self._nat_source in (
|
||||
network['name'], network['id']):
|
||||
if nat_source:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Multiple networks were found matching'
|
||||
' {nat_net} which is the network configured'
|
||||
' to be the NAT source. Please check your'
|
||||
' cloud resources. It is probably a good idea'
|
||||
' to configure this network by ID rather than'
|
||||
' by name.'.format(
|
||||
nat_net=self._nat_source))
|
||||
external_ipv4_floating_networks.append(network)
|
||||
nat_source = network
|
||||
elif self._nat_source is None:
|
||||
if network.get('router:external'):
|
||||
external_ipv4_floating_networks.append(network)
|
||||
nat_source = nat_source or network
|
||||
|
||||
# NAT Destination
|
||||
if self._nat_destination in (
|
||||
network['name'], network['id']):
|
||||
if nat_destination:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Multiple networks were found matching'
|
||||
' {nat_net} which is the network configured'
|
||||
' to be the NAT destination. Please check your'
|
||||
' cloud resources. It is probably a good idea'
|
||||
' to configure this network by ID rather than'
|
||||
' by name.'.format(
|
||||
nat_net=self._nat_destination))
|
||||
nat_destination = network
|
||||
elif self._nat_destination is None:
|
||||
# TODO(mordred) need a config value for floating
|
||||
# ips for this cloud so that we can skip this
|
||||
# No configured nat destination, we have to figured
|
||||
# it out.
|
||||
if all_subnets is None:
|
||||
try:
|
||||
all_subnets = self.list_subnets()
|
||||
except exc.OpenStackCloudException:
|
||||
# Thanks Rackspace broken neutron
|
||||
all_subnets = []
|
||||
|
||||
for subnet in all_subnets:
|
||||
# TODO(mordred) trap for detecting more than
|
||||
# one network with a gateway_ip without a config
|
||||
if ('gateway_ip' in subnet and subnet['gateway_ip']
|
||||
and network['id'] == subnet['network_id']):
|
||||
nat_destination = network
|
||||
break
|
||||
|
||||
# Default network
|
||||
if self._default_network in (
|
||||
network['name'], network['id']):
|
||||
if default_network:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Multiple networks were found matching'
|
||||
' {default_net} which is the network'
|
||||
' configured to be the default interface'
|
||||
' network. Please check your cloud resources.'
|
||||
' It is probably a good idea'
|
||||
' to configure this network by ID rather than'
|
||||
' by name.'.format(
|
||||
default_net=self._default_network))
|
||||
default_network = network
|
||||
|
||||
# Validate config vs. reality
|
||||
for net_name in self._external_ipv4_names:
|
||||
if net_name not in [net['name'] for net in external_ipv4_networks]:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Networks: {network} was provided for external IPv4"
|
||||
" access and those networks could not be found".format(
|
||||
network=net_name))
|
||||
|
||||
for net_name in self._internal_ipv4_names:
|
||||
if net_name not in [net['name'] for net in internal_ipv4_networks]:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Networks: {network} was provided for internal IPv4"
|
||||
" access and those networks could not be found".format(
|
||||
network=net_name))
|
||||
|
||||
for net_name in self._external_ipv6_names:
|
||||
if net_name not in [net['name'] for net in external_ipv6_networks]:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Networks: {network} was provided for external IPv6"
|
||||
" access and those networks could not be found".format(
|
||||
network=net_name))
|
||||
|
||||
for net_name in self._internal_ipv6_names:
|
||||
if net_name not in [net['name'] for net in internal_ipv6_networks]:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Networks: {network} was provided for internal IPv6"
|
||||
" access and those networks could not be found".format(
|
||||
network=net_name))
|
||||
|
||||
if self._nat_destination and not nat_destination:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Network {network} was configured to be the'
|
||||
' destination for inbound NAT but it could not be'
|
||||
' found'.format(
|
||||
network=self._nat_destination))
|
||||
|
||||
if self._nat_source and not nat_source:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Network {network} was configured to be the'
|
||||
' source for inbound NAT but it could not be'
|
||||
' found'.format(
|
||||
network=self._nat_source))
|
||||
|
||||
if self._default_network and not default_network:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Network {network} was configured to be the'
|
||||
' default network interface but it could not be'
|
||||
' found'.format(
|
||||
network=self._default_network))
|
||||
|
||||
self._external_ipv4_networks = external_ipv4_networks
|
||||
self._external_ipv4_floating_networks = external_ipv4_floating_networks
|
||||
self._internal_ipv4_networks = internal_ipv4_networks
|
||||
self._external_ipv6_networks = external_ipv6_networks
|
||||
self._internal_ipv6_networks = internal_ipv6_networks
|
||||
self._nat_destination_network = nat_destination
|
||||
self._nat_source_network = nat_source
|
||||
self._default_network_network = default_network
|
||||
|
||||
def _find_interesting_networks(self):
|
||||
if self._networks_lock.acquire():
|
||||
try:
|
||||
if self._network_list_stamp:
|
||||
return
|
||||
if (not self._use_external_network
|
||||
and not self._use_internal_network):
|
||||
# Both have been flagged as skip - don't do a list
|
||||
return
|
||||
if not self.has_service('network'):
|
||||
return
|
||||
self._set_interesting_networks()
|
||||
self._network_list_stamp = True
|
||||
finally:
|
||||
self._networks_lock.release()
|
||||
|
||||
# def get_nat_destination(self):
|
||||
# """Return the network that is configured to be the NAT destination.
|
||||
#
|
||||
# :returns: A network dict if one is found
|
||||
# """
|
||||
# self._find_interesting_networks()
|
||||
# return self._nat_destination_network
|
||||
|
||||
def get_nat_source(self):
|
||||
"""Return the network that is configured to be the NAT destination.
|
||||
|
||||
:returns: A network dict if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._nat_source_network
|
||||
|
||||
def get_default_network(self):
|
||||
"""Return the network that is configured to be the default interface.
|
||||
|
||||
:returns: A network dict if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._default_network_network
|
||||
|
||||
def get_nat_destination(self):
|
||||
"""Return the network that is configured to be the NAT destination.
|
||||
|
||||
:returns: A network dict if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._nat_destination_network
|
||||
|
||||
def get_external_networks(self):
|
||||
"""Return the networks that are configured to route northbound.
|
||||
|
||||
This should be avoided in favor of the specific ipv4/ipv6 method,
|
||||
but is here for backwards compatibility.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return list(
|
||||
set(self._external_ipv4_networks)
|
||||
| set(self._external_ipv6_networks))
|
||||
|
||||
def get_internal_networks(self):
|
||||
"""Return the networks that are configured to not route northbound.
|
||||
|
||||
This should be avoided in favor of the specific ipv4/ipv6 method,
|
||||
but is here for backwards compatibility.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return list(
|
||||
set(self._internal_ipv4_networks)
|
||||
| set(self._internal_ipv6_networks))
|
||||
|
||||
def get_external_ipv4_networks(self):
|
||||
"""Return the networks that are configured to route northbound.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._external_ipv4_networks
|
||||
|
||||
def get_external_ipv4_floating_networks(self):
|
||||
"""Return the networks that are configured to route northbound.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._external_ipv4_floating_networks
|
||||
|
||||
def get_internal_ipv4_networks(self):
|
||||
"""Return the networks that are configured to not route northbound.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._internal_ipv4_networks
|
||||
|
||||
def get_external_ipv6_networks(self):
|
||||
"""Return the networks that are configured to route northbound.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._external_ipv6_networks
|
||||
|
||||
def get_internal_ipv6_networks(self):
|
||||
"""Return the networks that are configured to not route northbound.
|
||||
|
||||
:returns: A list of network ``munch.Munch`` if one is found
|
||||
"""
|
||||
self._find_interesting_networks()
|
||||
return self._internal_ipv6_networks
|
|
@ -0,0 +1,837 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import collections
|
||||
import concurrent.futures
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import six
|
||||
import types # noqa
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
import keystoneauth1.exceptions
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
from openstack import exceptions
|
||||
from openstack import proxy
|
||||
|
||||
|
||||
DEFAULT_OBJECT_SEGMENT_SIZE = 1073741824 # 1GB
|
||||
# This halves the current default for Swift
|
||||
DEFAULT_MAX_FILE_SIZE = (5 * 1024 * 1024 * 1024 + 2) / 2
|
||||
|
||||
|
||||
OBJECT_CONTAINER_ACLS = {
|
||||
'public': '.r:*,.rlistings',
|
||||
'private': '',
|
||||
}
|
||||
|
||||
|
||||
class ObjectStoreCloudMixin(_normalize.Normalizer):
|
||||
|
||||
def __init__(self):
|
||||
self.__pool_executor = None
|
||||
|
||||
@property
|
||||
def _object_store_client(self):
|
||||
if 'object-store' not in self._raw_clients:
|
||||
raw_client = self._get_raw_client('object-store')
|
||||
self._raw_clients['object-store'] = raw_client
|
||||
return self._raw_clients['object-store']
|
||||
|
||||
@property
|
||||
def _pool_executor(self):
|
||||
if not self.__pool_executor:
|
||||
# TODO(mordred) Make this configurable - and probably use Futurist
|
||||
# instead of concurrent.futures so that people using Eventlet will
|
||||
# be happier.
|
||||
self.__pool_executor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=5)
|
||||
return self.__pool_executor
|
||||
|
||||
def list_containers(self, full_listing=True, prefix=None):
|
||||
"""List containers.
|
||||
|
||||
:param full_listing: Ignored. Present for backwards compat
|
||||
|
||||
:returns: list of Munch of the container objects
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
params = dict(format='json', prefix=prefix)
|
||||
response = self.object_store.get('/', params=params)
|
||||
return self._get_and_munchify(None, proxy._json_response(response))
|
||||
|
||||
def search_containers(self, name=None, filters=None):
|
||||
"""Search containers.
|
||||
|
||||
:param string name: container name.
|
||||
:param filters: a dict containing additional filters to use.
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: a list of ``munch.Munch`` containing the containers.
|
||||
|
||||
:raises: ``OpenStackCloudException``: if something goes wrong during
|
||||
the OpenStack API call.
|
||||
"""
|
||||
containers = self.list_containers()
|
||||
return _utils._filter_list(containers, name, filters)
|
||||
|
||||
def get_container(self, name, skip_cache=False):
|
||||
"""Get metadata about a container.
|
||||
|
||||
:param str name:
|
||||
Name of the container to get metadata for.
|
||||
:param bool skip_cache:
|
||||
Ignore the cache of container metadata for this container.o
|
||||
Defaults to ``False``.
|
||||
"""
|
||||
if skip_cache or name not in self._container_cache:
|
||||
try:
|
||||
response = self.object_store.head(name)
|
||||
exceptions.raise_from_response(response)
|
||||
self._container_cache[name] = response.headers
|
||||
except exc.OpenStackCloudHTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
return None
|
||||
raise
|
||||
return self._container_cache[name]
|
||||
|
||||
def create_container(self, name, public=False):
|
||||
"""Create an object-store container.
|
||||
|
||||
:param str name:
|
||||
Name of the container to create.
|
||||
:param bool public:
|
||||
Whether to set this container to be public. Defaults to ``False``.
|
||||
"""
|
||||
container = self.get_container(name)
|
||||
if container:
|
||||
return container
|
||||
exceptions.raise_from_response(self.object_store.put(name))
|
||||
if public:
|
||||
self.set_container_access(name, 'public')
|
||||
return self.get_container(name, skip_cache=True)
|
||||
|
||||
def delete_container(self, name):
|
||||
"""Delete an object-store container.
|
||||
|
||||
:param str name: Name of the container to delete.
|
||||
"""
|
||||
try:
|
||||
exceptions.raise_from_response(self.object_store.delete(name))
|
||||
self._container_cache.pop(name, None)
|
||||
return True
|
||||
except exc.OpenStackCloudHTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
return False
|
||||
if e.response.status_code == 409:
|
||||
raise exc.OpenStackCloudException(
|
||||
'Attempt to delete container {container} failed. The'
|
||||
' container is not empty. Please delete the objects'
|
||||
' inside it before deleting the container'.format(
|
||||
container=name))
|
||||
raise
|
||||
|
||||
def update_container(self, name, headers):
|
||||
"""Update the metadata in a container.
|
||||
|
||||
:param str name:
|
||||
Name of the container to create.
|
||||
:param dict headers:
|
||||
Key/Value headers to set on the container.
|
||||
"""
|
||||
"""Update the metadata in a container.
|
||||
|
||||
:param str name:
|
||||
Name of the container to update.
|
||||
:param dict headers:
|
||||
Key/Value headers to set on the container.
|
||||
"""
|
||||
exceptions.raise_from_response(
|
||||
self.object_store.post(name, headers=headers))
|
||||
|
||||
def set_container_access(self, name, access):
|
||||
"""Set the access control list on a container.
|
||||
|
||||
:param str name:
|
||||
Name of the container.
|
||||
:param str access:
|
||||
ACL string to set on the container. Can also be ``public``
|
||||
or ``private`` which will be translated into appropriate ACL
|
||||
strings.
|
||||
"""
|
||||
if access not in OBJECT_CONTAINER_ACLS:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Invalid container access specified: %s. Must be one of %s"
|
||||
% (access, list(OBJECT_CONTAINER_ACLS.keys())))
|
||||
header = {'x-container-read': OBJECT_CONTAINER_ACLS[access]}
|
||||
self.update_container(name, header)
|
||||
|
||||
def get_container_access(self, name):
|
||||
"""Get the control list from a container.
|
||||
|
||||
:param str name: Name of the container.
|
||||
"""
|
||||
container = self.get_container(name, skip_cache=True)
|
||||
if not container:
|
||||
raise exc.OpenStackCloudException("Container not found: %s" % name)
|
||||
acl = container.get('x-container-read', '')
|
||||
for key, value in OBJECT_CONTAINER_ACLS.items():
|
||||
# Convert to string for the comparison because swiftclient
|
||||
# returns byte values as bytes sometimes and apparently ==
|
||||
# on bytes doesn't work like you'd think
|
||||
if str(acl) == str(value):
|
||||
return key
|
||||
raise exc.OpenStackCloudException(
|
||||
"Could not determine container access for ACL: %s." % acl)
|
||||
|
||||
def _get_file_hashes(self, filename):
|
||||
file_key = "{filename}:{mtime}".format(
|
||||
filename=filename,
|
||||
mtime=os.stat(filename).st_mtime)
|
||||
if file_key not in self._file_hash_cache:
|
||||
self.log.debug(
|
||||
'Calculating hashes for %(filename)s', {'filename': filename})
|
||||
md5 = hashlib.md5()
|
||||
sha256 = hashlib.sha256()
|
||||
with open(filename, 'rb') as file_obj:
|
||||
for chunk in iter(lambda: file_obj.read(8192), b''):
|
||||
md5.update(chunk)
|
||||
sha256.update(chunk)
|
||||
self._file_hash_cache[file_key] = dict(
|
||||
md5=md5.hexdigest(), sha256=sha256.hexdigest())
|
||||
self.log.debug(
|
||||
"Image file %(filename)s md5:%(md5)s sha256:%(sha256)s",
|
||||
{'filename': filename,
|
||||
'md5': self._file_hash_cache[file_key]['md5'],
|
||||
'sha256': self._file_hash_cache[file_key]['sha256']})
|
||||
return (self._file_hash_cache[file_key]['md5'],
|
||||
self._file_hash_cache[file_key]['sha256'])
|
||||
|
||||
@_utils.cache_on_arguments()
|
||||
def get_object_capabilities(self):
|
||||
"""Get infomation about the object-storage service
|
||||
|
||||
The object-storage service publishes a set of capabilities that
|
||||
include metadata about maximum values and thresholds.
|
||||
"""
|
||||
# The endpoint in the catalog has version and project-id in it
|
||||
# To get capabilities, we have to disassemble and reassemble the URL
|
||||
# This logic is taken from swiftclient
|
||||
endpoint = urllib.parse.urlparse(self.object_store.get_endpoint())
|
||||
url = "{scheme}://{netloc}/info".format(
|
||||
scheme=endpoint.scheme, netloc=endpoint.netloc)
|
||||
|
||||
return proxy._json_response(self.object_store.get(url))
|
||||
|
||||
def get_object_segment_size(self, segment_size):
|
||||
"""Get a segment size that will work given capabilities"""
|
||||
if segment_size is None:
|
||||
segment_size = DEFAULT_OBJECT_SEGMENT_SIZE
|
||||
min_segment_size = 0
|
||||
try:
|
||||
caps = self.get_object_capabilities()
|
||||
except exc.OpenStackCloudHTTPError as e:
|
||||
if e.response.status_code in (404, 412):
|
||||
# Clear the exception so that it doesn't linger
|
||||
# and get reported as an Inner Exception later
|
||||
_utils._exc_clear()
|
||||
server_max_file_size = DEFAULT_MAX_FILE_SIZE
|
||||
self.log.info(
|
||||
"Swift capabilities not supported. "
|
||||
"Using default max file size.")
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
server_max_file_size = caps.get('swift', {}).get('max_file_size',
|
||||
0)
|
||||
min_segment_size = caps.get('slo', {}).get('min_segment_size', 0)
|
||||
|
||||
if segment_size > server_max_file_size:
|
||||
return server_max_file_size
|
||||
if segment_size < min_segment_size:
|
||||
return min_segment_size
|
||||
return segment_size
|
||||
|
||||
def is_object_stale(
|
||||
self, container, name, filename, file_md5=None, file_sha256=None):
|
||||
"""Check to see if an object matches the hashes of a file.
|
||||
|
||||
:param container: Name of the container.
|
||||
:param name: Name of the object.
|
||||
:param filename: Path to the file.
|
||||
:param file_md5:
|
||||
Pre-calculated md5 of the file contents. Defaults to None which
|
||||
means calculate locally.
|
||||
:param file_sha256:
|
||||
Pre-calculated sha256 of the file contents. Defaults to None which
|
||||
means calculate locally.
|
||||
"""
|
||||
metadata = self.get_object_metadata(container, name)
|
||||
if not metadata:
|
||||
self.log.debug(
|
||||
"swift stale check, no object: {container}/{name}".format(
|
||||
container=container, name=name))
|
||||
return True
|
||||
|
||||
if not (file_md5 or file_sha256):
|
||||
(file_md5, file_sha256) = self._get_file_hashes(filename)
|
||||
md5_key = metadata.get(
|
||||
self._OBJECT_MD5_KEY, metadata.get(self._SHADE_OBJECT_MD5_KEY, ''))
|
||||
sha256_key = metadata.get(
|
||||
self._OBJECT_SHA256_KEY, metadata.get(
|
||||
self._SHADE_OBJECT_SHA256_KEY, ''))
|
||||
up_to_date = self._hashes_up_to_date(
|
||||
md5=file_md5, sha256=file_sha256,
|
||||
md5_key=md5_key, sha256_key=sha256_key)
|
||||
|
||||
if not up_to_date:
|
||||
self.log.debug(
|
||||
"swift checksum mismatch: "
|
||||
" %(filename)s!=%(container)s/%(name)s",
|
||||
{'filename': filename, 'container': container, 'name': name})
|
||||
return True
|
||||
|
||||
self.log.debug(
|
||||
"swift object up to date: %(container)s/%(name)s",
|
||||
{'container': container, 'name': name})
|
||||
return False
|
||||
|
||||
def create_directory_marker_object(self, container, name, **headers):
|
||||
"""Create a zero-byte directory marker object
|
||||
|
||||
.. note::
|
||||
|
||||
This method is not needed in most cases. Modern swift does not
|
||||
require directory marker objects. However, some swift installs may
|
||||
need these.
|
||||
|
||||
When using swift Static Web and Web Listings to serve static content
|
||||
one may need to create a zero-byte object to represent each
|
||||
"directory". Doing so allows Web Listings to generate an index of the
|
||||
objects inside of it, and allows Static Web to render index.html
|
||||
"files" that are "inside" the directory.
|
||||
|
||||
:param container: The name of the container.
|
||||
:param name: Name for the directory marker object within the container.
|
||||
:param headers: These will be passed through to the object creation
|
||||
API as HTTP Headers.
|
||||
"""
|
||||
headers['content-type'] = 'application/directory'
|
||||
|
||||
return self.create_object(
|
||||
container,
|
||||
name,
|
||||
data='',
|
||||
generate_checksums=False,
|
||||
**headers)
|
||||
|
||||
def create_object(
|
||||
self, container, name, filename=None,
|
||||
md5=None, sha256=None, segment_size=None,
|
||||
use_slo=True, metadata=None,
|
||||
generate_checksums=None, data=None,
|
||||
**headers):
|
||||
"""Create a file object.
|
||||
|
||||
Automatically uses large-object segments if needed.
|
||||
|
||||
:param container: The name of the container to store the file in.
|
||||
This container will be created if it does not exist already.
|
||||
:param name: Name for the object within the container.
|
||||
:param filename: The path to the local file whose contents will be
|
||||
uploaded. Mutually exclusive with data.
|
||||
:param data: The content to upload to the object. Mutually exclusive
|
||||
with filename.
|
||||
:param md5: A hexadecimal md5 of the file. (Optional), if it is known
|
||||
and can be passed here, it will save repeating the expensive md5
|
||||
process. It is assumed to be accurate.
|
||||
:param sha256: A hexadecimal sha256 of the file. (Optional) See md5.
|
||||
:param segment_size: Break the uploaded object into segments of this
|
||||
many bytes. (Optional) Shade will attempt to discover the maximum
|
||||
value for this from the server if it is not specified, or will use
|
||||
a reasonable default.
|
||||
:param headers: These will be passed through to the object creation
|
||||
API as HTTP Headers.
|
||||
:param use_slo: If the object is large enough to need to be a Large
|
||||
Object, use a static rather than dynamic object. Static Objects
|
||||
will delete segment objects when the manifest object is deleted.
|
||||
(optional, defaults to True)
|
||||
:param generate_checksums: Whether to generate checksums on the client
|
||||
side that get added to headers for later prevention of double
|
||||
uploads of identical data. (optional, defaults to True)
|
||||
:param metadata: This dict will get changed into headers that set
|
||||
metadata of the object
|
||||
|
||||
:raises: ``OpenStackCloudException`` on operation error.
|
||||
"""
|
||||
if data is not None and filename:
|
||||
raise ValueError(
|
||||
"Both filename and data given. Please choose one.")
|
||||
if data is not None and not name:
|
||||
raise ValueError(
|
||||
"name is a required parameter when data is given")
|
||||
if data is not None and generate_checksums:
|
||||
raise ValueError(
|
||||
"checksums cannot be generated with data parameter")
|
||||
if generate_checksums is None:
|
||||
if data is not None:
|
||||
generate_checksums = False
|
||||
else:
|
||||
generate_checksums = True
|
||||
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
|
||||
if not filename and data is None:
|
||||
filename = name
|
||||
|
||||
if generate_checksums and (md5 is None or sha256 is None):
|
||||
(md5, sha256) = self._get_file_hashes(filename)
|
||||
if md5:
|
||||
headers[self._OBJECT_MD5_KEY] = md5 or ''
|
||||
if sha256:
|
||||
headers[self._OBJECT_SHA256_KEY] = sha256 or ''
|
||||
for (k, v) in metadata.items():
|
||||
headers['x-object-meta-' + k] = v
|
||||
|
||||
endpoint = '{container}/{name}'.format(container=container, name=name)
|
||||
|
||||
if data is not None:
|
||||
self.log.debug(
|
||||
"swift uploading data to %(endpoint)s",
|
||||
{'endpoint': endpoint})
|
||||
|
||||
return self._upload_object_data(endpoint, data, headers)
|
||||
|
||||
# segment_size gets used as a step value in a range call, so needs
|
||||
# to be an int
|
||||
if segment_size:
|
||||
segment_size = int(segment_size)
|
||||
segment_size = self.get_object_segment_size(segment_size)
|
||||
file_size = os.path.getsize(filename)
|
||||
|
||||
if self.is_object_stale(container, name, filename, md5, sha256):
|
||||
|
||||
self.log.debug(
|
||||
"swift uploading %(filename)s to %(endpoint)s",
|
||||
{'filename': filename, 'endpoint': endpoint})
|
||||
|
||||
if file_size <= segment_size:
|
||||
self._upload_object(endpoint, filename, headers)
|
||||
else:
|
||||
self._upload_large_object(
|
||||
endpoint, filename, headers,
|
||||
file_size, segment_size, use_slo)
|
||||
|
||||
def _upload_object_data(self, endpoint, data, headers):
|
||||
return proxy._json_response(self.object_store.put(
|
||||
endpoint, headers=headers, data=data))
|
||||
|
||||
def _upload_object(self, endpoint, filename, headers):
|
||||
return proxy._json_response(self.object_store.put(
|
||||
endpoint, headers=headers, data=open(filename, 'rb')))
|
||||
|
||||
def _get_file_segments(self, endpoint, filename, file_size, segment_size):
|
||||
# Use an ordered dict here so that testing can replicate things
|
||||
segments = collections.OrderedDict()
|
||||
for (index, offset) in enumerate(range(0, file_size, segment_size)):
|
||||
remaining = file_size - (index * segment_size)
|
||||
segment = _utils.FileSegment(
|
||||
filename, offset,
|
||||
segment_size if segment_size < remaining else remaining)
|
||||
name = '{endpoint}/{index:0>6}'.format(
|
||||
endpoint=endpoint, index=index)
|
||||
segments[name] = segment
|
||||
return segments
|
||||
|
||||
def _object_name_from_url(self, url):
|
||||
'''Get container_name/object_name from the full URL called.
|
||||
|
||||
Remove the Swift endpoint from the front of the URL, and remove
|
||||
the leaving / that will leave behind.'''
|
||||
endpoint = self.object_store.get_endpoint()
|
||||
object_name = url.replace(endpoint, '')
|
||||
if object_name.startswith('/'):
|
||||
object_name = object_name[1:]
|
||||
return object_name
|
||||
|
||||
def _add_etag_to_manifest(self, segment_results, manifest):
|
||||
for result in segment_results:
|
||||
if 'Etag' not in result.headers:
|
||||
continue
|
||||
name = self._object_name_from_url(result.url)
|
||||
for entry in manifest:
|
||||
if entry['path'] == '/{name}'.format(name=name):
|
||||
entry['etag'] = result.headers['Etag']
|
||||
|
||||
def _upload_large_object(
|
||||
self, endpoint, filename,
|
||||
headers, file_size, segment_size, use_slo):
|
||||
# If the object is big, we need to break it up into segments that
|
||||
# are no larger than segment_size, upload each of them individually
|
||||
# and then upload a manifest object. The segments can be uploaded in
|
||||
# parallel, so we'll use the async feature of the TaskManager.
|
||||
|
||||
segment_futures = []
|
||||
segment_results = []
|
||||
retry_results = []
|
||||
retry_futures = []
|
||||
manifest = []
|
||||
|
||||
# Get an OrderedDict with keys being the swift location for the
|
||||
# segment, the value a FileSegment file-like object that is a
|
||||
# slice of the data for the segment.
|
||||
segments = self._get_file_segments(
|
||||
endpoint, filename, file_size, segment_size)
|
||||
|
||||
# Schedule the segments for upload
|
||||
for name, segment in segments.items():
|
||||
# Async call to put - schedules execution and returns a future
|
||||
segment_future = self._pool_executor.submit(
|
||||
self.object_store.put,
|
||||
name, headers=headers, data=segment,
|
||||
raise_exc=False)
|
||||
segment_futures.append(segment_future)
|
||||
# TODO(mordred) Collect etags from results to add to this manifest
|
||||
# dict. Then sort the list of dicts by path.
|
||||
manifest.append(dict(
|
||||
path='/{name}'.format(name=name),
|
||||
size_bytes=segment.length))
|
||||
|
||||
# Try once and collect failed results to retry
|
||||
segment_results, retry_results = self._wait_for_futures(
|
||||
segment_futures, raise_on_error=False)
|
||||
|
||||
self._add_etag_to_manifest(segment_results, manifest)
|
||||
|
||||
for result in retry_results:
|
||||
# Grab the FileSegment for the failed upload so we can retry
|
||||
name = self._object_name_from_url(result.url)
|
||||
segment = segments[name]
|
||||
segment.seek(0)
|
||||
# Async call to put - schedules execution and returns a future
|
||||
segment_future = self._pool_executor.submit(
|
||||
self.object_store.put,
|
||||
name, headers=headers, data=segment)
|
||||
# TODO(mordred) Collect etags from results to add to this manifest
|
||||
# dict. Then sort the list of dicts by path.
|
||||
retry_futures.append(segment_future)
|
||||
|
||||
# If any segments fail the second time, just throw the error
|
||||
segment_results, retry_results = self._wait_for_futures(
|
||||
retry_futures, raise_on_error=True)
|
||||
|
||||
self._add_etag_to_manifest(segment_results, manifest)
|
||||
|
||||
if use_slo:
|
||||
return self._finish_large_object_slo(endpoint, headers, manifest)
|
||||
else:
|
||||
return self._finish_large_object_dlo(endpoint, headers)
|
||||
|
||||
def _finish_large_object_slo(self, endpoint, headers, manifest):
|
||||
# TODO(mordred) send an etag of the manifest, which is the md5sum
|
||||
# of the concatenation of the etags of the results
|
||||
headers = headers.copy()
|
||||
return self._object_store_client.put(
|
||||
endpoint,
|
||||
params={'multipart-manifest': 'put'},
|
||||
headers=headers, data=json.dumps(manifest))
|
||||
|
||||
def _finish_large_object_dlo(self, endpoint, headers):
|
||||
headers = headers.copy()
|
||||
headers['X-Object-Manifest'] = endpoint
|
||||
return self._object_store_client.put(endpoint, headers=headers)
|
||||
|
||||
def update_object(self, container, name, metadata=None, **headers):
|
||||
"""Update the metadata of an object
|
||||
|
||||
:param container: The name of the container the object is in
|
||||
:param name: Name for the object within the container.
|
||||
:param metadata: This dict will get changed into headers that set
|
||||
metadata of the object
|
||||
:param headers: These will be passed through to the object update
|
||||
API as HTTP Headers.
|
||||
|
||||
:raises: ``OpenStackCloudException`` on operation error.
|
||||
"""
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
|
||||
metadata_headers = {}
|
||||
|
||||
for (k, v) in metadata.items():
|
||||
metadata_headers['x-object-meta-' + k] = v
|
||||
|
||||
headers = dict(headers, **metadata_headers)
|
||||
|
||||
return self._object_store_client.post(
|
||||
'{container}/{object}'.format(
|
||||
container=container, object=name),
|
||||
headers=headers)
|
||||
|
||||
def list_objects(self, container, full_listing=True, prefix=None):
|
||||
"""List objects.
|
||||
|
||||
:param container: Name of the container to list objects in.
|
||||
:param full_listing: Ignored. Present for backwards compat
|
||||
:param string prefix:
|
||||
only objects with this prefix will be returned.
|
||||
(optional)
|
||||
|
||||
:returns: list of Munch of the objects
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
params = dict(format='json', prefix=prefix)
|
||||
data = self._object_store_client.get(container, params=params)
|
||||
return self._get_and_munchify(None, data)
|
||||
|
||||
def search_objects(self, container, name=None, filters=None):
|
||||
"""Search objects.
|
||||
|
||||
:param string name: object name.
|
||||
:param filters: a dict containing additional filters to use.
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: a list of ``munch.Munch`` containing the objects.
|
||||
|
||||
:raises: ``OpenStackCloudException``: if something goes wrong during
|
||||
the OpenStack API call.
|
||||
"""
|
||||
objects = self.list_objects(container)
|
||||
return _utils._filter_list(objects, name, filters)
|
||||
|
||||
def delete_object(self, container, name, meta=None):
|
||||
"""Delete an object from a container.
|
||||
|
||||
:param string container: Name of the container holding the object.
|
||||
:param string name: Name of the object to delete.
|
||||
:param dict meta: Metadata for the object in question. (optional, will
|
||||
be fetched if not provided)
|
||||
|
||||
:returns: True if delete succeeded, False if the object was not found.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
# TODO(mordred) DELETE for swift returns status in text/plain format
|
||||
# like so:
|
||||
# Number Deleted: 15
|
||||
# Number Not Found: 0
|
||||
# Response Body:
|
||||
# Response Status: 200 OK
|
||||
# Errors:
|
||||
# We should ultimately do something with that
|
||||
try:
|
||||
if not meta:
|
||||
meta = self.get_object_metadata(container, name)
|
||||
if not meta:
|
||||
return False
|
||||
params = {}
|
||||
if meta.get('X-Static-Large-Object', None) == 'True':
|
||||
params['multipart-manifest'] = 'delete'
|
||||
self._object_store_client.delete(
|
||||
'{container}/{object}'.format(
|
||||
container=container, object=name),
|
||||
params=params)
|
||||
return True
|
||||
except exc.OpenStackCloudHTTPError:
|
||||
return False
|
||||
|
||||
def delete_autocreated_image_objects(self, container=None):
|
||||
"""Delete all objects autocreated for image uploads.
|
||||
|
||||
This method should generally not be needed, as shade should clean up
|
||||
the objects it uses for object-based image creation. If something
|
||||
goes wrong and it is found that there are leaked objects, this method
|
||||
can be used to delete any objects that shade has created on the user's
|
||||
behalf in service of image uploads.
|
||||
"""
|
||||
if container is None:
|
||||
container = self._OBJECT_AUTOCREATE_CONTAINER
|
||||
# This method only makes sense on clouds that use tasks
|
||||
if not self.image_api_use_tasks:
|
||||
return False
|
||||
|
||||
deleted = False
|
||||
for obj in self.list_objects(container):
|
||||
meta = self.get_object_metadata(container, obj['name'])
|
||||
if meta.get(
|
||||
self._OBJECT_AUTOCREATE_KEY, meta.get(
|
||||
self._SHADE_OBJECT_AUTOCREATE_KEY)) == 'true':
|
||||
if self.delete_object(container, obj['name'], meta):
|
||||
deleted = True
|
||||
return deleted
|
||||
|
||||
def get_object_metadata(self, container, name):
|
||||
try:
|
||||
return self._object_store_client.head(
|
||||
'{container}/{object}'.format(
|
||||
container=container, object=name)).headers
|
||||
except exc.OpenStackCloudException as e:
|
||||
if e.response.status_code == 404:
|
||||
return None
|
||||
raise
|
||||
|
||||
def get_object_raw(self, container, obj, query_string=None, stream=False):
|
||||
"""Get a raw response object for an object.
|
||||
|
||||
:param string container: name of the container.
|
||||
:param string obj: name of the object.
|
||||
:param string query_string:
|
||||
query args for uri. (delimiter, prefix, etc.)
|
||||
:param bool stream:
|
||||
Whether to stream the response or not.
|
||||
|
||||
:returns: A `requests.Response`
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
endpoint = self._get_object_endpoint(container, obj, query_string)
|
||||
return self._object_store_client.get(endpoint, stream=stream)
|
||||
|
||||
def _get_object_endpoint(self, container, obj, query_string):
|
||||
endpoint = '{container}/{object}'.format(
|
||||
container=container, object=obj)
|
||||
if query_string:
|
||||
endpoint = '{endpoint}?{query_string}'.format(
|
||||
endpoint=endpoint, query_string=query_string)
|
||||
return endpoint
|
||||
|
||||
def stream_object(
|
||||
self, container, obj, query_string=None, resp_chunk_size=1024):
|
||||
"""Download the content via a streaming iterator.
|
||||
|
||||
:param string container: name of the container.
|
||||
:param string obj: name of the object.
|
||||
:param string query_string:
|
||||
query args for uri. (delimiter, prefix, etc.)
|
||||
:param int resp_chunk_size:
|
||||
chunk size of data to read. Only used if the results are
|
||||
|
||||
:returns:
|
||||
An iterator over the content or None if the object is not found.
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
try:
|
||||
with self.get_object_raw(
|
||||
container, obj, query_string=query_string) as response:
|
||||
for ret in response.iter_content(chunk_size=resp_chunk_size):
|
||||
yield ret
|
||||
except exc.OpenStackCloudHTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
return
|
||||
raise
|
||||
|
||||
def get_object(self, container, obj, query_string=None,
|
||||
resp_chunk_size=1024, outfile=None, stream=False):
|
||||
"""Get the headers and body of an object
|
||||
|
||||
:param string container: name of the container.
|
||||
:param string obj: name of the object.
|
||||
:param string query_string:
|
||||
query args for uri. (delimiter, prefix, etc.)
|
||||
:param int resp_chunk_size:
|
||||
chunk size of data to read. Only used if the results are
|
||||
being written to a file or stream is True.
|
||||
(optional, defaults to 1k)
|
||||
:param outfile:
|
||||
Write the object to a file instead of returning the contents.
|
||||
If this option is given, body in the return tuple will be None.
|
||||
outfile can either be a file path given as a string, or a
|
||||
File like object.
|
||||
|
||||
:returns: Tuple (headers, body) of the object, or None if the object
|
||||
is not found (404).
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
# TODO(mordred) implement resp_chunk_size
|
||||
endpoint = self._get_object_endpoint(container, obj, query_string)
|
||||
try:
|
||||
get_stream = (outfile is not None)
|
||||
with self._object_store_client.get(
|
||||
endpoint, stream=get_stream) as response:
|
||||
response_headers = {
|
||||
k.lower(): v for k, v in response.headers.items()}
|
||||
if outfile:
|
||||
if isinstance(outfile, six.string_types):
|
||||
outfile_handle = open(outfile, 'wb')
|
||||
else:
|
||||
outfile_handle = outfile
|
||||
for chunk in response.iter_content(
|
||||
resp_chunk_size, decode_unicode=False):
|
||||
outfile_handle.write(chunk)
|
||||
if isinstance(outfile, six.string_types):
|
||||
outfile_handle.close()
|
||||
else:
|
||||
outfile_handle.flush()
|
||||
return (response_headers, None)
|
||||
else:
|
||||
return (response_headers, response.text)
|
||||
except exc.OpenStackCloudHTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
return None
|
||||
raise
|
||||
|
||||
def _wait_for_futures(self, futures, raise_on_error=True):
|
||||
'''Collect results or failures from a list of running future tasks.'''
|
||||
|
||||
results = []
|
||||
retries = []
|
||||
|
||||
# Check on each result as its thread finishes
|
||||
for completed in concurrent.futures.as_completed(futures):
|
||||
try:
|
||||
result = completed.result()
|
||||
exceptions.raise_from_response(result)
|
||||
results.append(result)
|
||||
except (keystoneauth1.exceptions.RetriableConnectionFailure,
|
||||
exceptions.HttpException) as e:
|
||||
error_text = "Exception processing async task: {}".format(
|
||||
str(e))
|
||||
if raise_on_error:
|
||||
self.log.exception(error_text)
|
||||
raise
|
||||
else:
|
||||
self.log.debug(error_text)
|
||||
# If we get an exception, put the result into a list so we
|
||||
# can try again
|
||||
retries.append(completed.result())
|
||||
return results, retries
|
||||
|
||||
def _hashes_up_to_date(self, md5, sha256, md5_key, sha256_key):
|
||||
'''Compare md5 and sha256 hashes for being up to date
|
||||
|
||||
md5 and sha256 are the current values.
|
||||
md5_key and sha256_key are the previous values.
|
||||
'''
|
||||
up_to_date = False
|
||||
if md5 and md5_key == md5:
|
||||
up_to_date = True
|
||||
if sha256 and sha256_key == sha256:
|
||||
up_to_date = True
|
||||
if md5 and md5_key != md5:
|
||||
up_to_date = False
|
||||
if sha256 and sha256_key != sha256:
|
||||
up_to_date = False
|
||||
return up_to_date
|
|
@ -0,0 +1,275 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
import types # noqa
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud._heat import event_utils
|
||||
from openstack.cloud._heat import template_utils
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
|
||||
|
||||
def _no_pending_stacks(stacks):
|
||||
"""If there are any stacks not in a steady state, don't cache"""
|
||||
for stack in stacks:
|
||||
status = stack['stack_status']
|
||||
if '_COMPLETE' not in status and '_FAILED' not in status:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class OrchestrationCloudMixin(_normalize.Normalizer):
|
||||
|
||||
@property
|
||||
def _orchestration_client(self):
|
||||
if 'orchestration' not in self._raw_clients:
|
||||
raw_client = self._get_raw_client('orchestration')
|
||||
self._raw_clients['orchestration'] = raw_client
|
||||
return self._raw_clients['orchestration']
|
||||
|
||||
def get_template_contents(
|
||||
self, template_file=None, template_url=None,
|
||||
template_object=None, files=None):
|
||||
try:
|
||||
return template_utils.get_template_contents(
|
||||
template_file=template_file, template_url=template_url,
|
||||
template_object=template_object, files=files)
|
||||
except Exception as e:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Error in processing template files: %s" % str(e))
|
||||
|
||||
def create_stack(
|
||||
self, name, tags=None,
|
||||
template_file=None, template_url=None,
|
||||
template_object=None, files=None,
|
||||
rollback=True,
|
||||
wait=False, timeout=3600,
|
||||
environment_files=None,
|
||||
**parameters):
|
||||
"""Create a stack.
|
||||
|
||||
:param string name: Name of the stack.
|
||||
:param tags: List of tag(s) of the stack. (optional)
|
||||
:param string template_file: Path to the template.
|
||||
:param string template_url: URL of template.
|
||||
:param string template_object: URL to retrieve template object.
|
||||
:param dict files: dict of additional file content to include.
|
||||
:param boolean rollback: Enable rollback on create failure.
|
||||
:param boolean wait: Whether to wait for the delete to finish.
|
||||
:param int timeout: Stack create timeout in seconds.
|
||||
:param environment_files: Paths to environment files to apply.
|
||||
|
||||
Other arguments will be passed as stack parameters which will take
|
||||
precedence over any parameters specified in the environments.
|
||||
|
||||
Only one of template_file, template_url, template_object should be
|
||||
specified.
|
||||
|
||||
:returns: a dict containing the stack description
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during
|
||||
the OpenStack API call
|
||||
"""
|
||||
envfiles, env = template_utils.process_multiple_environments_and_files(
|
||||
env_paths=environment_files)
|
||||
tpl_files, template = template_utils.get_template_contents(
|
||||
template_file=template_file,
|
||||
template_url=template_url,
|
||||
template_object=template_object,
|
||||
files=files)
|
||||
params = dict(
|
||||
stack_name=name,
|
||||
tags=tags,
|
||||
disable_rollback=not rollback,
|
||||
parameters=parameters,
|
||||
template=template,
|
||||
files=dict(list(tpl_files.items()) + list(envfiles.items())),
|
||||
environment=env,
|
||||
timeout_mins=timeout // 60,
|
||||
)
|
||||
self._orchestration_client.post('/stacks', json=params)
|
||||
if wait:
|
||||
event_utils.poll_for_events(self, stack_name=name,
|
||||
action='CREATE')
|
||||
return self.get_stack(name)
|
||||
|
||||
def update_stack(
|
||||
self, name_or_id,
|
||||
template_file=None, template_url=None,
|
||||
template_object=None, files=None,
|
||||
rollback=True,
|
||||
wait=False, timeout=3600,
|
||||
environment_files=None,
|
||||
**parameters):
|
||||
"""Update a stack.
|
||||
|
||||
:param string name_or_id: Name or ID of the stack to update.
|
||||
:param string template_file: Path to the template.
|
||||
:param string template_url: URL of template.
|
||||
:param string template_object: URL to retrieve template object.
|
||||
:param dict files: dict of additional file content to include.
|
||||
:param boolean rollback: Enable rollback on update failure.
|
||||
:param boolean wait: Whether to wait for the delete to finish.
|
||||
:param int timeout: Stack update timeout in seconds.
|
||||
:param environment_files: Paths to environment files to apply.
|
||||
|
||||
Other arguments will be passed as stack parameters which will take
|
||||
precedence over any parameters specified in the environments.
|
||||
|
||||
Only one of template_file, template_url, template_object should be
|
||||
specified.
|
||||
|
||||
:returns: a dict containing the stack description
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during
|
||||
the OpenStack API calls
|
||||
"""
|
||||
envfiles, env = template_utils.process_multiple_environments_and_files(
|
||||
env_paths=environment_files)
|
||||
tpl_files, template = template_utils.get_template_contents(
|
||||
template_file=template_file,
|
||||
template_url=template_url,
|
||||
template_object=template_object,
|
||||
files=files)
|
||||
params = dict(
|
||||
disable_rollback=not rollback,
|
||||
parameters=parameters,
|
||||
template=template,
|
||||
files=dict(list(tpl_files.items()) + list(envfiles.items())),
|
||||
environment=env,
|
||||
timeout_mins=timeout // 60,
|
||||
)
|
||||
if wait:
|
||||
# find the last event to use as the marker
|
||||
events = event_utils.get_events(
|
||||
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1})
|
||||
marker = events[0].id if events else None
|
||||
|
||||
self._orchestration_client.put(
|
||||
'/stacks/{name_or_id}'.format(name_or_id=name_or_id), json=params)
|
||||
if wait:
|
||||
event_utils.poll_for_events(self,
|
||||
name_or_id,
|
||||
action='UPDATE',
|
||||
marker=marker)
|
||||
return self.get_stack(name_or_id)
|
||||
|
||||
def delete_stack(self, name_or_id, wait=False):
|
||||
"""Delete a stack
|
||||
|
||||
:param string name_or_id: Stack name or ID.
|
||||
:param boolean wait: Whether to wait for the delete to finish
|
||||
|
||||
:returns: True if delete succeeded, False if the stack was not found.
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during
|
||||
the OpenStack API call
|
||||
"""
|
||||
stack = self.get_stack(name_or_id, resolve_outputs=False)
|
||||
if stack is None:
|
||||
self.log.debug("Stack %s not found for deleting", name_or_id)
|
||||
return False
|
||||
|
||||
if wait:
|
||||
# find the last event to use as the marker
|
||||
events = event_utils.get_events(
|
||||
self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1})
|
||||
marker = events[0].id if events else None
|
||||
|
||||
self._orchestration_client.delete(
|
||||
'/stacks/{id}'.format(id=stack['id']))
|
||||
|
||||
if wait:
|
||||
try:
|
||||
event_utils.poll_for_events(self,
|
||||
stack_name=name_or_id,
|
||||
action='DELETE',
|
||||
marker=marker)
|
||||
except exc.OpenStackCloudHTTPError:
|
||||
pass
|
||||
stack = self.get_stack(name_or_id, resolve_outputs=False)
|
||||
if stack and stack['stack_status'] == 'DELETE_FAILED':
|
||||
raise exc.OpenStackCloudException(
|
||||
"Failed to delete stack {id}: {reason}".format(
|
||||
id=name_or_id, reason=stack['stack_status_reason']))
|
||||
|
||||
return True
|
||||
|
||||
def search_stacks(self, name_or_id=None, filters=None):
|
||||
"""Search stacks.
|
||||
|
||||
:param name_or_id: Name or ID of the desired stack.
|
||||
:param filters: a dict containing additional filters to use. e.g.
|
||||
{'stack_status': 'CREATE_COMPLETE'}
|
||||
|
||||
:returns: a list of ``munch.Munch`` containing the stack description.
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during the
|
||||
OpenStack API call.
|
||||
"""
|
||||
stacks = self.list_stacks()
|
||||
return _utils._filter_list(stacks, name_or_id, filters)
|
||||
|
||||
@_utils.cache_on_arguments(should_cache_fn=_no_pending_stacks)
|
||||
def list_stacks(self):
|
||||
"""List all stacks.
|
||||
|
||||
:returns: a list of ``munch.Munch`` containing the stack description.
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during the
|
||||
OpenStack API call.
|
||||
"""
|
||||
data = self._orchestration_client.get(
|
||||
'/stacks', error_message="Error fetching stack list")
|
||||
return self._normalize_stacks(
|
||||
self._get_and_munchify('stacks', data))
|
||||
|
||||
def get_stack(self, name_or_id, filters=None, resolve_outputs=True):
|
||||
"""Get exactly one stack.
|
||||
|
||||
:param name_or_id: Name or ID of the desired stack.
|
||||
:param filters: a dict containing additional filters to use. e.g.
|
||||
{'stack_status': 'CREATE_COMPLETE'}
|
||||
:param resolve_outputs: If True, then outputs for this
|
||||
stack will be resolved
|
||||
|
||||
:returns: a ``munch.Munch`` containing the stack description
|
||||
|
||||
:raises: ``OpenStackCloudException`` if something goes wrong during the
|
||||
OpenStack API call or if multiple matches are found.
|
||||
"""
|
||||
|
||||
def _search_one_stack(name_or_id=None, filters=None):
|
||||
# stack names are mandatory and enforced unique in the project
|
||||
# so a StackGet can always be used for name or ID.
|
||||
try:
|
||||
url = '/stacks/{name_or_id}'.format(name_or_id=name_or_id)
|
||||
if not resolve_outputs:
|
||||
url = '{url}?resolve_outputs=False'.format(url=url)
|
||||
data = self._orchestration_client.get(
|
||||
url,
|
||||
error_message="Error fetching stack")
|
||||
stack = self._get_and_munchify('stack', data)
|
||||
# Treat DELETE_COMPLETE stacks as a NotFound
|
||||
if stack['stack_status'] == 'DELETE_COMPLETE':
|
||||
return []
|
||||
except exc.OpenStackCloudURINotFound:
|
||||
return []
|
||||
stack = self._normalize_stack(stack)
|
||||
return _utils._filter_list([stack], name_or_id, filters)
|
||||
|
||||
return _utils._get_entity(
|
||||
self, _search_one_stack, name_or_id, filters)
|
|
@ -0,0 +1,387 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# import types so that we can reference ListType in sphinx param declarations.
|
||||
# We can't just use list, because sphinx gets confused by
|
||||
# openstack.resource.Resource.list and openstack.resource2.Resource.list
|
||||
# import jsonpatch
|
||||
import types # noqa
|
||||
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import _normalize
|
||||
from openstack.cloud import _utils
|
||||
from openstack import exceptions
|
||||
from openstack import proxy
|
||||
|
||||
|
||||
class SecurityGroupCloudMixin(_normalize.Normalizer):
|
||||
|
||||
def __init__(self):
|
||||
self.secgroup_source = self.config.config['secgroup_source']
|
||||
|
||||
def get_security_group(self, name_or_id, filters=None):
|
||||
"""Get a security group by name or ID.
|
||||
|
||||
:param name_or_id: Name or ID of the security group.
|
||||
:param filters:
|
||||
A dictionary of meta data to use for further filtering. Elements
|
||||
of this dictionary may, themselves, be dictionaries. Example::
|
||||
|
||||
{
|
||||
'last_name': 'Smith',
|
||||
'other': {
|
||||
'gender': 'Female'
|
||||
}
|
||||
}
|
||||
|
||||
OR
|
||||
A string containing a jmespath expression for further filtering.
|
||||
Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
|
||||
|
||||
:returns: A security group ``munch.Munch`` or None if no matching
|
||||
security group is found.
|
||||
|
||||
"""
|
||||
return _utils._get_entity(
|
||||
self, 'security_group', name_or_id, filters)
|
||||
|
||||
def get_security_group_by_id(self, id):
|
||||
""" Get a security group by ID
|
||||
|
||||
:param id: ID of the security group.
|
||||
:returns: A security group ``munch.Munch``.
|
||||
"""
|
||||
if not self._has_secgroups():
|
||||
raise exc.OpenStackCloudUnavailableFeature(
|
||||
"Unavailable feature: security groups"
|
||||
)
|
||||
error_message = ("Error getting security group with"
|
||||
" ID {id}".format(id=id))
|
||||
if self._use_neutron_secgroups():
|
||||
resp = self.network.get('/security-groups/{id}'.format(id=id))
|
||||
data = proxy._json_response(resp, error_message=error_message)
|
||||
else:
|
||||
data = proxy._json_response(
|
||||
self.compute.get(
|
||||
'/os-security-groups/{id}'.format(id=id)),
|
||||
error_message=error_message)
|
||||
return self._normalize_secgroup(
|
||||
self._get_and_munchify('security_group', data))
|
||||
|
||||
def create_security_group(self, name, description, project_id=None):
|
||||
"""Create a new security group
|
||||
|
||||
:param string name: A name for the security group.
|
||||
:param string description: Describes the security group.
|
||||
:param string project_id:
|
||||
Specify the project ID this security group will be created
|
||||
on (admin-only).
|
||||
|
||||
:returns: A ``munch.Munch`` representing the new security group.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
:raises: OpenStackCloudUnavailableFeature if security groups are
|
||||
not supported on this cloud.
|
||||
"""
|
||||
|
||||
# Security groups not supported
|
||||
if not self._has_secgroups():
|
||||
raise exc.OpenStackCloudUnavailableFeature(
|
||||
"Unavailable feature: security groups"
|
||||
)
|
||||
|
||||
data = []
|
||||
security_group_json = {
|
||||
'security_group': {
|
||||
'name': name, 'description': description
|
||||
}}
|
||||
if project_id is not None:
|
||||
security_group_json['security_group']['tenant_id'] = project_id
|
||||
if self._use_neutron_secgroups():
|
||||
data = proxy._json_response(
|
||||
self.network.post(
|
||||
'/security-groups.json',
|
||||
json=security_group_json),
|
||||
error_message="Error creating security group {0}".format(name))
|
||||
else:
|
||||
data = proxy._json_response(self.compute.post(
|
||||
'/os-security-groups', json=security_group_json))
|
||||
return self._normalize_secgroup(
|
||||
self._get_and_munchify('security_group', data))
|
||||
|
||||
def delete_security_group(self, name_or_id):
|
||||
"""Delete a security group
|
||||
|
||||
:param string name_or_id: The name or unique ID of the security group.
|
||||
|
||||
:returns: True if delete succeeded, False otherwise.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
:raises: OpenStackCloudUnavailableFeature if security groups are
|
||||
not supported on this cloud.
|
||||
"""
|
||||
# Security groups not supported
|
||||
if not self._has_secgroups():
|
||||
raise exc.OpenStackCloudUnavailableFeature(
|
||||
"Unavailable feature: security groups"
|
||||
)
|
||||
|
||||
# TODO(mordred): Let's come back and stop doing a GET before we do
|
||||
# the delete.
|
||||
secgroup = self.get_security_group(name_or_id)
|
||||
if secgroup is None:
|
||||
self.log.debug('Security group %s not found for deleting',
|
||||
name_or_id)
|
||||
return False
|
||||
|
||||
if self._use_neutron_secgroups():
|
||||
exceptions.raise_from_response(
|
||||
self.network.delete(
|
||||
'/security-groups/{sg_id}.json'.format(
|
||||
sg_id=secgroup['id'])),
|
||||
error_message="Error deleting security group {0}".format(
|
||||
name_or_id)
|
||||
)
|
||||
return True
|
||||
|
||||
else:
|
||||
proxy._json_response(self.compute.delete(
|
||||
'/os-security-groups/{id}'.format(id=secgroup['id'])))
|
||||
return True
|
||||
|
||||
@_utils.valid_kwargs('name', 'description')
|
||||
def update_security_group(self, name_or_id, **kwargs):
|
||||
"""Update a security group
|
||||
|
||||
:param string name_or_id: Name or ID of the security group to update.
|
||||
:param string name: New name for the security group.
|
||||
:param string description: New description for the security group.
|
||||
|
||||
:returns: A ``munch.Munch`` describing the updated security group.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
# Security groups not supported
|
||||
if not self._has_secgroups():
|
||||
raise exc.OpenStackCloudUnavailableFeature(
|
||||
"Unavailable feature: security groups"
|
||||
)
|
||||
|
||||
group = self.get_security_group(name_or_id)
|
||||
|
||||
if group is None:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Security group %s not found." % name_or_id)
|
||||
|
||||
if self._use_neutron_secgroups():
|
||||
data = proxy._json_response(
|
||||
self.network.put(
|
||||
'/security-groups/{sg_id}.json'.format(sg_id=group['id']),
|
||||
json={'security_group': kwargs}),
|
||||
error_message="Error updating security group {0}".format(
|
||||
name_or_id))
|
||||
else:
|
||||
for key in ('name', 'description'):
|
||||
kwargs.setdefault(key, group[key])
|
||||
data = proxy._json_response(
|
||||
self.compute.put(
|
||||
'/os-security-groups/{id}'.format(id=group['id']),
|
||||
json={'security_group': kwargs}))
|
||||
return self._normalize_secgroup(
|
||||
self._get_and_munchify('security_group', data))
|
||||
|
||||
def create_security_group_rule(self,
|
||||
secgroup_name_or_id,
|
||||
port_range_min=None,
|
||||
port_range_max=None,
|
||||
protocol=None,
|
||||
remote_ip_prefix=None,
|
||||
remote_group_id=None,
|
||||
direction='ingress',
|
||||
ethertype='IPv4',
|
||||
project_id=None):
|
||||
"""Create a new security group rule
|
||||
|
||||
:param string secgroup_name_or_id:
|
||||
The security group name or ID to associate with this security
|
||||
group rule. If a non-unique group name is given, an exception
|
||||
is raised.
|
||||
:param int port_range_min:
|
||||
The minimum port number in the range that is matched by the
|
||||
security group rule. If the protocol is TCP or UDP, this value
|
||||
must be less than or equal to the port_range_max attribute value.
|
||||
If nova is used by the cloud provider for security groups, then
|
||||
a value of None will be transformed to -1.
|
||||
:param int port_range_max:
|
||||
The maximum port number in the range that is matched by the
|
||||
security group rule. The port_range_min attribute constrains the
|
||||
port_range_max attribute. If nova is used by the cloud provider
|
||||
for security groups, then a value of None will be transformed
|
||||
to -1.
|
||||
:param string protocol:
|
||||
The protocol that is matched by the security group rule. Valid
|
||||
values are None, tcp, udp, and icmp.
|
||||
:param string remote_ip_prefix:
|
||||
The remote IP prefix to be associated with this security group
|
||||
rule. This attribute matches the specified IP prefix as the
|
||||
source IP address of the IP packet.
|
||||
:param string remote_group_id:
|
||||
The remote group ID to be associated with this security group
|
||||
rule.
|
||||
:param string direction:
|
||||
Ingress or egress: The direction in which the security group
|
||||
rule is applied. For a compute instance, an ingress security
|
||||
group rule is applied to incoming (ingress) traffic for that
|
||||
instance. An egress rule is applied to traffic leaving the
|
||||
instance.
|
||||
:param string ethertype:
|
||||
Must be IPv4 or IPv6, and addresses represented in CIDR must
|
||||
match the ingress or egress rules.
|
||||
:param string project_id:
|
||||
Specify the project ID this security group will be created
|
||||
on (admin-only).
|
||||
|
||||
:returns: A ``munch.Munch`` representing the new security group rule.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
"""
|
||||
# Security groups not supported
|
||||
if not self._has_secgroups():
|
||||
raise exc.OpenStackCloudUnavailableFeature(
|
||||
"Unavailable feature: security groups"
|
||||
)
|
||||
|
||||
secgroup = self.get_security_group(secgroup_name_or_id)
|
||||
if not secgroup:
|
||||
raise exc.OpenStackCloudException(
|
||||
"Security group %s not found." % secgroup_name_or_id)
|
||||
|
||||
if self._use_neutron_secgroups():
|
||||
# NOTE: Nova accepts -1 port numbers, but Neutron accepts None
|
||||
# as the equivalent value.
|
||||
rule_def = {
|
||||
'security_group_id': secgroup['id'],
|
||||
'port_range_min':
|
||||
None if port_range_min == -1 else port_range_min,
|
||||
'port_range_max':
|
||||
None if port_range_max == -1 else port_range_max,
|
||||
'protocol': protocol,
|
||||
'remote_ip_prefix': remote_ip_prefix,
|
||||
'remote_group_id': remote_group_id,
|
||||
'direction': direction,
|
||||
'ethertype': ethertype
|
||||
}
|
||||
if project_id is not None:
|
||||
rule_def['tenant_id'] = project_id
|
||||
|
||||
data = proxy._json_response(
|
||||
self.network.post(
|
||||
'/security-group-rules.json',
|
||||
json={'security_group_rule': rule_def}),
|
||||
error_message="Error creating security group rule")
|
||||
else:
|
||||
# NOTE: Neutron accepts None for protocol. Nova does not.
|
||||
if protocol is None:
|
||||
raise exc.OpenStackCloudException('Protocol must be specified')
|
||||
|
||||
if direction == 'egress':
|
||||
self.log.debug(
|
||||
'Rule creation failed: Nova does not support egress rules'
|
||||
)
|
||||
raise exc.OpenStackCloudException(
|
||||
'No support for egress rules')
|
||||
|
||||
# NOTE: Neutron accepts None for ports, but Nova requires -1
|
||||
# as the equivalent value for ICMP.
|
||||
#
|
||||
# For TCP/UDP, if both are None, Neutron allows this and Nova
|
||||
# represents this as all ports (1-65535). Nova does not accept
|
||||
# None values, so to hide this difference, we will automatically
|
||||
# convert to the full port range. If only a single port value is
|
||||
# specified, it will error as normal.
|
||||
if protocol == 'icmp':
|
||||
if port_range_min is None:
|
||||
port_range_min = -1
|
||||
if port_range_max is None:
|
||||
port_range_max = -1
|
||||
elif protocol in ['tcp', 'udp']:
|
||||
if port_range_min is None and port_range_max is None:
|
||||
port_range_min = 1
|
||||
port_range_max = 65535
|
||||
|
||||
security_group_rule_dict = dict(security_group_rule=dict(
|
||||
parent_group_id=secgroup['id'],
|
||||
ip_protocol=protocol,
|
||||
from_port=port_range_min,
|
||||
to_port=port_range_max,
|
||||
cidr=remote_ip_prefix,
|
||||
group_id=remote_group_id
|
||||
))
|
||||
if project_id is not None:
|
||||
security_group_rule_dict[
|
||||
'security_group_rule']['tenant_id'] = project_id
|
||||
data = proxy._json_response(
|
||||
self.compute.post(
|
||||
'/os-security-group-rules',
|
||||
json=security_group_rule_dict
|
||||
))
|
||||
return self._normalize_secgroup_rule(
|
||||
self._get_and_munchify('security_group_rule', data))
|
||||
|
||||
def delete_security_group_rule(self, rule_id):
|
||||
"""Delete a security group rule
|
||||
|
||||
:param string rule_id: The unique ID of the security group rule.
|
||||
|
||||
:returns: True if delete succeeded, False otherwise.
|
||||
|
||||
:raises: OpenStackCloudException on operation error.
|
||||
:raises: OpenStackCloudUnavailableFeature if security groups are
|
||||
not supported on this cloud.
|
||||
"""
|
||||
# Security groups not supported
|
||||
if not self._has_secgroups():
|
||||
raise exc.OpenStackCloudUnavailableFeature(
|
||||
"Unavailable feature: security groups"
|
||||
)
|
||||
|
||||
if self._use_neutron_secgroups():
|
||||
try:
|
||||
exceptions.raise_from_response(
|
||||
self.network.delete(
|
||||
'/security-group-rules/{sg_id}.json'.format(
|
||||
sg_id=rule_id)),
|
||||
error_message="Error deleting security group rule "
|
||||
"{0}".format(rule_id))
|
||||
except exc.OpenStackCloudResourceNotFound:
|
||||
return False
|
||||
return True
|
||||
|
||||
else:
|
||||
try:
|
||||
exceptions.raise_from_response(
|
||||
self.compute.delete(
|
||||
'/os-security-group-rules/{id}'.format(id=rule_id)))
|
||||
except exc.OpenStackCloudResourceNotFound:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _has_secgroups(self):
|
||||
if not self.secgroup_source:
|
||||
return False
|
||||
else:
|
||||
return self.secgroup_source.lower() in ('nova', 'neutron')
|
||||
|
||||
def _use_neutron_secgroups(self):
|
||||
return (self.has_service('network')
|
||||
and self.secgroup_source == 'neutron')
|
File diff suppressed because it is too large
Load Diff
|
@ -164,6 +164,20 @@ import six
|
|||
from openstack import _log
|
||||
from openstack._meta import connection as _meta
|
||||
from openstack.cloud import openstackcloud as _cloud
|
||||
from openstack.cloud import _baremetal
|
||||
from openstack.cloud import _block_storage
|
||||
from openstack.cloud import _compute
|
||||
from openstack.cloud import _clustering
|
||||
from openstack.cloud import _coe
|
||||
from openstack.cloud import _dns
|
||||
from openstack.cloud import _floating_ip
|
||||
from openstack.cloud import _identity
|
||||
from openstack.cloud import _image
|
||||
from openstack.cloud import _network
|
||||
from openstack.cloud import _network_common
|
||||
from openstack.cloud import _object_store
|
||||
from openstack.cloud import _orchestration
|
||||
from openstack.cloud import _security_group
|
||||
from openstack import config as _config
|
||||
from openstack.config import cloud_region
|
||||
from openstack import exceptions
|
||||
|
@ -211,7 +225,22 @@ def from_config(cloud=None, config=None, options=None, **kwargs):
|
|||
|
||||
|
||||
class Connection(six.with_metaclass(_meta.ConnectionMeta,
|
||||
_cloud._OpenStackCloudMixin)):
|
||||
_cloud._OpenStackCloudMixin,
|
||||
_baremetal.BaremetalCloudMixin,
|
||||
_block_storage.BlockStorageCloudMixin,
|
||||
_compute.ComputeCloudMixin,
|
||||
_clustering.ClusteringCloudMixin,
|
||||
_coe.CoeCloudMixin,
|
||||
_dns.DnsCloudMixin,
|
||||
_floating_ip.FloatingIPCloudMixin,
|
||||
_identity.IdentityCloudMixin,
|
||||
_image.ImageCloudMixin,
|
||||
_network.NetworkCloudMixin,
|
||||
_network_common.NetworkCommonCloudMixin,
|
||||
_object_store.ObjectStoreCloudMixin,
|
||||
_orchestration.OrchestrationCloudMixin,
|
||||
_security_group.SecurityGroupCloudMixin
|
||||
)):
|
||||
|
||||
def __init__(self, cloud=None, config=None, session=None,
|
||||
app_name=None, app_version=None,
|
||||
|
@ -298,9 +327,23 @@ class Connection(six.with_metaclass(_meta.ConnectionMeta,
|
|||
self._proxies = {}
|
||||
self.use_direct_get = use_direct_get
|
||||
self.strict_mode = strict
|
||||
# Call the _OpenStackCloudMixin constructor while we work on
|
||||
# Call the _*CloudMixin constructors while we work on
|
||||
# integrating things better.
|
||||
_cloud._OpenStackCloudMixin.__init__(self)
|
||||
_baremetal.BaremetalCloudMixin.__init__(self)
|
||||
_block_storage.BlockStorageCloudMixin.__init__(self)
|
||||
_clustering.ClusteringCloudMixin.__init__(self)
|
||||
_coe.CoeCloudMixin.__init__(self)
|
||||
_compute.ComputeCloudMixin.__init__(self)
|
||||
_dns.DnsCloudMixin.__init__(self)
|
||||
_floating_ip.FloatingIPCloudMixin.__init__(self)
|
||||
_identity.IdentityCloudMixin.__init__(self)
|
||||
_image.ImageCloudMixin.__init__(self)
|
||||
_network_common.NetworkCommonCloudMixin.__init__(self)
|
||||
_network.NetworkCloudMixin.__init__(self)
|
||||
_object_store.ObjectStoreCloudMixin.__init__(self)
|
||||
_orchestration.OrchestrationCloudMixin.__init__(self)
|
||||
_security_group.SecurityGroupCloudMixin.__init__(self)
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
# import unittest
|
||||
|
||||
from openstack import connection
|
||||
from openstack.cloud.openstackcloud import _OpenStackCloudMixin
|
||||
from openstack.tests.functional import base
|
||||
|
||||
HYPERVISORS = []
|
||||
|
@ -25,7 +24,7 @@ def hypervisors():
|
|||
global HYPERVISORS
|
||||
if HYPERVISORS:
|
||||
return True
|
||||
HYPERVISORS = _OpenStackCloudMixin.list_hypervisors(
|
||||
HYPERVISORS = connection.Connection.list_hypervisors(
|
||||
connection.from_config(cloud_name=base.TEST_CLOUD_NAME))
|
||||
return bool(HYPERVISORS)
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import uuid
|
|||
|
||||
import mock
|
||||
|
||||
from openstack.cloud import openstackcloud
|
||||
from openstack import connection
|
||||
from openstack.cloud import exc
|
||||
from openstack.cloud import meta
|
||||
from openstack.tests import fakes
|
||||
|
@ -325,7 +325,7 @@ class TestCreateServer(base.TestCase):
|
|||
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, "wait_for_server")
|
||||
@mock.patch.object(connection.Connection, "wait_for_server")
|
||||
def test_create_server_with_admin_pass_wait(self, mock_wait):
|
||||
"""
|
||||
Test that a server with an admin_pass passed returns the password
|
||||
|
@ -411,9 +411,8 @@ class TestCreateServer(base.TestCase):
|
|||
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(
|
||||
openstackcloud._OpenStackCloudMixin, "get_active_server")
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, "get_server")
|
||||
@mock.patch.object(connection.Connection, "get_active_server")
|
||||
@mock.patch.object(connection.Connection, "get_server")
|
||||
def test_wait_for_server(self, mock_get_server, mock_get_active_server):
|
||||
"""
|
||||
Test that waiting for a server returns the server instance when
|
||||
|
@ -447,7 +446,7 @@ class TestCreateServer(base.TestCase):
|
|||
|
||||
self.assertEqual('ACTIVE', server['status'])
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'wait_for_server')
|
||||
@mock.patch.object(connection.Connection, 'wait_for_server')
|
||||
def test_create_server_wait(self, mock_wait):
|
||||
"""
|
||||
Test that create_server with a wait actually does the wait.
|
||||
|
@ -484,8 +483,7 @@ class TestCreateServer(base.TestCase):
|
|||
)
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(
|
||||
openstackcloud._OpenStackCloudMixin, 'add_ips_to_server')
|
||||
@mock.patch.object(connection.Connection, 'add_ips_to_server')
|
||||
def test_create_server_no_addresses(
|
||||
self, mock_add_ips_to_server):
|
||||
"""
|
||||
|
|
|
@ -21,17 +21,17 @@ Tests floating IP resource methods for Neutron and Nova-network.
|
|||
|
||||
from mock import patch
|
||||
|
||||
from openstack import connection
|
||||
from openstack.cloud import meta
|
||||
from openstack.cloud import openstackcloud
|
||||
from openstack.tests import fakes
|
||||
from openstack.tests.unit import base
|
||||
|
||||
|
||||
class TestFloatingIP(base.TestCase):
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'get_floating_ip')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_attach_ip_to_server')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'available_floating_ip')
|
||||
@patch.object(connection.Connection, 'get_floating_ip')
|
||||
@patch.object(connection.Connection, '_attach_ip_to_server')
|
||||
@patch.object(connection.Connection, 'available_floating_ip')
|
||||
def test_add_auto_ip(
|
||||
self, mock_available_floating_ip, mock_attach_ip_to_server,
|
||||
mock_get_floating_ip):
|
||||
|
@ -57,7 +57,7 @@ class TestFloatingIP(base.TestCase):
|
|||
timeout=60, wait=False, server=server_dict,
|
||||
floating_ip=floating_ip_dict, skip_attach=False)
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_add_ip_from_pool')
|
||||
@patch.object(connection.Connection, '_add_ip_from_pool')
|
||||
def test_add_ips_to_server_pool(self, mock_add_ip_from_pool):
|
||||
server_dict = fakes.make_fake_server(
|
||||
server_id='romeo', name='test-server', status="ACTIVE",
|
||||
|
@ -70,9 +70,9 @@ class TestFloatingIP(base.TestCase):
|
|||
server_dict, pool, reuse=True, wait=False, timeout=60,
|
||||
fixed_address=None, nat_destination=None)
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'has_service')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'get_floating_ip')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_add_auto_ip')
|
||||
@patch.object(connection.Connection, 'has_service')
|
||||
@patch.object(connection.Connection, 'get_floating_ip')
|
||||
@patch.object(connection.Connection, '_add_auto_ip')
|
||||
def test_add_ips_to_server_ipv6_only(
|
||||
self, mock_add_auto_ip,
|
||||
mock_get_floating_ip,
|
||||
|
@ -109,9 +109,9 @@ class TestFloatingIP(base.TestCase):
|
|||
self.assertEqual(
|
||||
new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525')
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'has_service')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'get_floating_ip')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_add_auto_ip')
|
||||
@patch.object(connection.Connection, 'has_service')
|
||||
@patch.object(connection.Connection, 'get_floating_ip')
|
||||
@patch.object(connection.Connection, '_add_auto_ip')
|
||||
def test_add_ips_to_server_rackspace(
|
||||
self, mock_add_auto_ip,
|
||||
mock_get_floating_ip,
|
||||
|
@ -145,9 +145,9 @@ class TestFloatingIP(base.TestCase):
|
|||
new_server['interface_ip'],
|
||||
'2001:4800:7819:103:be76:4eff:fe05:8525')
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'has_service')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'get_floating_ip')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_add_auto_ip')
|
||||
@patch.object(connection.Connection, 'has_service')
|
||||
@patch.object(connection.Connection, 'get_floating_ip')
|
||||
@patch.object(connection.Connection, '_add_auto_ip')
|
||||
def test_add_ips_to_server_rackspace_local_ipv4(
|
||||
self, mock_add_auto_ip,
|
||||
mock_get_floating_ip,
|
||||
|
@ -179,7 +179,7 @@ class TestFloatingIP(base.TestCase):
|
|||
mock_add_auto_ip.assert_not_called()
|
||||
self.assertEqual(new_server['interface_ip'], '104.130.246.91')
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, 'add_ip_list')
|
||||
@patch.object(connection.Connection, 'add_ip_list')
|
||||
def test_add_ips_to_server_ip_list(self, mock_add_ip_list):
|
||||
server_dict = fakes.make_fake_server(
|
||||
server_id='server-id', name='test-server', status="ACTIVE",
|
||||
|
@ -191,8 +191,8 @@ class TestFloatingIP(base.TestCase):
|
|||
mock_add_ip_list.assert_called_with(
|
||||
server_dict, ips, wait=False, timeout=60, fixed_address=None)
|
||||
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_needs_floating_ip')
|
||||
@patch.object(openstackcloud._OpenStackCloudMixin, '_add_auto_ip')
|
||||
@patch.object(connection.Connection, '_needs_floating_ip')
|
||||
@patch.object(connection.Connection, '_add_auto_ip')
|
||||
def test_add_ips_to_server_auto_ip(
|
||||
self, mock_add_auto_ip, mock_needs_floating_ip):
|
||||
server_dict = fakes.make_fake_server(
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
import mock
|
||||
|
||||
from openstack.cloud import openstackcloud
|
||||
from openstack import connection
|
||||
from openstack.cloud import meta
|
||||
from openstack.tests import fakes
|
||||
from openstack.tests.unit import base
|
||||
|
@ -353,10 +353,10 @@ class TestMeta(base.TestCase):
|
|||
'10.0.0.101', meta.get_server_private_ip(srv, self.cloud))
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'has_service')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_volumes')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_image_name')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_flavor_name')
|
||||
@mock.patch.object(connection.Connection, 'has_service')
|
||||
@mock.patch.object(connection.Connection, 'get_volumes')
|
||||
@mock.patch.object(connection.Connection, 'get_image_name')
|
||||
@mock.patch.object(connection.Connection, 'get_flavor_name')
|
||||
def test_get_server_private_ip_devstack(
|
||||
self,
|
||||
mock_get_flavor_name, mock_get_image_name,
|
||||
|
@ -418,9 +418,9 @@ class TestMeta(base.TestCase):
|
|||
self.assertEqual(PRIVATE_V4, srv['private_v4'])
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_volumes')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_image_name')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_flavor_name')
|
||||
@mock.patch.object(connection.Connection, 'get_volumes')
|
||||
@mock.patch.object(connection.Connection, 'get_image_name')
|
||||
@mock.patch.object(connection.Connection, 'get_flavor_name')
|
||||
def test_get_server_private_ip_no_fip(
|
||||
self,
|
||||
mock_get_flavor_name, mock_get_image_name,
|
||||
|
@ -468,9 +468,9 @@ class TestMeta(base.TestCase):
|
|||
self.assertEqual(PRIVATE_V4, srv['private_v4'])
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_volumes')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_image_name')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_flavor_name')
|
||||
@mock.patch.object(connection.Connection, 'get_volumes')
|
||||
@mock.patch.object(connection.Connection, 'get_image_name')
|
||||
@mock.patch.object(connection.Connection, 'get_flavor_name')
|
||||
def test_get_server_cloud_no_fips(
|
||||
self,
|
||||
mock_get_flavor_name, mock_get_image_name,
|
||||
|
@ -516,10 +516,10 @@ class TestMeta(base.TestCase):
|
|||
self.assertEqual(PRIVATE_V4, srv['private_v4'])
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'has_service')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_volumes')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_image_name')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_flavor_name')
|
||||
@mock.patch.object(connection.Connection, 'has_service')
|
||||
@mock.patch.object(connection.Connection, 'get_volumes')
|
||||
@mock.patch.object(connection.Connection, 'get_image_name')
|
||||
@mock.patch.object(connection.Connection, 'get_flavor_name')
|
||||
def test_get_server_cloud_missing_fips(
|
||||
self,
|
||||
mock_get_flavor_name, mock_get_image_name,
|
||||
|
@ -585,9 +585,9 @@ class TestMeta(base.TestCase):
|
|||
self.assertEqual(PUBLIC_V4, srv['public_v4'])
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_volumes')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_image_name')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_flavor_name')
|
||||
@mock.patch.object(connection.Connection, 'get_volumes')
|
||||
@mock.patch.object(connection.Connection, 'get_image_name')
|
||||
@mock.patch.object(connection.Connection, 'get_flavor_name')
|
||||
def test_get_server_cloud_rackspace_v6(
|
||||
self, mock_get_flavor_name, mock_get_image_name,
|
||||
mock_get_volumes):
|
||||
|
@ -635,9 +635,9 @@ class TestMeta(base.TestCase):
|
|||
"2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip'])
|
||||
self.assert_calls()
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_volumes')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_image_name')
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'get_flavor_name')
|
||||
@mock.patch.object(connection.Connection, 'get_volumes')
|
||||
@mock.patch.object(connection.Connection, 'get_image_name')
|
||||
@mock.patch.object(connection.Connection, 'get_flavor_name')
|
||||
def test_get_server_cloud_osic_split(
|
||||
self, mock_get_flavor_name, mock_get_image_name,
|
||||
mock_get_volumes):
|
||||
|
|
|
@ -15,7 +15,6 @@ import uuid
|
|||
|
||||
import testtools
|
||||
|
||||
from openstack.cloud import openstackcloud
|
||||
from openstack.cloud import exc
|
||||
from openstack import connection
|
||||
from openstack.tests import fakes
|
||||
|
@ -63,7 +62,7 @@ class TestShade(base.TestCase):
|
|||
# keystoneauth1.loading.base.BaseLoader.load_from_options
|
||||
self.cloud.connect_as(project_name='test_project')
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'search_images')
|
||||
@mock.patch.object(connection.Connection, 'search_images')
|
||||
def test_get_images(self, mock_search):
|
||||
image1 = dict(id='123', name='mickey')
|
||||
mock_search.return_value = [image1]
|
||||
|
@ -71,7 +70,7 @@ class TestShade(base.TestCase):
|
|||
self.assertIsNotNone(r)
|
||||
self.assertDictEqual(image1, r)
|
||||
|
||||
@mock.patch.object(openstackcloud._OpenStackCloudMixin, 'search_images')
|
||||
@mock.patch.object(connection.Connection, 'search_images')
|
||||
def test_get_image_not_found(self, mock_search):
|
||||
mock_search.return_value = []
|
||||
r = self.cloud.get_image('doesNotExist')
|
||||
|
|
Loading…
Reference in New Issue