Completely switch to openstacksdk

Change-Id: I1729797fa03095d200c7334281915abc284b5732
This commit is contained in:
Dmitry Tantsur 2018-11-22 13:35:10 +01:00
parent 6de505be69
commit eee74d31b8
18 changed files with 1003 additions and 1445 deletions

View File

@ -4,9 +4,8 @@ fixtures==3.0.0
flake8-import-order==0.13
hacking==1.0.0
mock==2.0
openstacksdk==0.17.0
openstacksdk==0.22.0
pbr==2.0.0
python-ironicclient==1.14.0
Pygments==2.2.0
requests==2.18.4
six==1.10.0

View File

@ -13,11 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import os
import shutil
import tempfile
import logging
from openstack.baremetal import configdrive
from metalsmith import _utils
LOG = logging.getLogger(__name__)
class InstanceConfig(object):
@ -56,13 +60,12 @@ class InstanceConfig(object):
kwargs.setdefault('ssh_authorized_keys', self.ssh_keys)
self.users.append(kwargs)
@contextlib.contextmanager
def build_configdrive_directory(self, node, hostname):
"""Build a configdrive from the provided information.
def build_configdrive(self, node, hostname):
"""Make the config drive.
:param node: `Node` object.
:param hostname: instance hostname.
:return: a context manager yielding a directory with files
:return: configdrive contents as a base64-encoded string.
"""
# NOTE(dtantsur): CirrOS does not understand lists
if isinstance(self.ssh_keys, list):
@ -70,33 +73,25 @@ class InstanceConfig(object):
else:
ssh_keys = self.ssh_keys
d = tempfile.mkdtemp()
try:
metadata = {'public_keys': ssh_keys,
'uuid': node.uuid,
'name': node.name,
'hostname': hostname,
'launch_index': 0,
'availability_zone': '',
'files': [],
'meta': {}}
user_data = {}
if self.users:
user_data['users'] = self.users
metadata = {'public_keys': ssh_keys,
'uuid': node.id,
'name': node.name,
'hostname': hostname,
'launch_index': 0,
'availability_zone': '',
'files': [],
'meta': {}}
user_data = {}
user_data_bin = None
for version in ('2012-08-10', 'latest'):
subdir = os.path.join(d, 'openstack', version)
if not os.path.exists(subdir):
os.makedirs(subdir)
if self.users:
user_data['users'] = self.users
with open(os.path.join(subdir, 'meta_data.json'), 'w') as fp:
json.dump(metadata, fp)
if user_data:
user_data_bin = ("#cloud-config\n" + json.dumps(user_data)).encode(
'utf-8')
if user_data:
with open(os.path.join(subdir, 'user_data'), 'w') as fp:
fp.write("#cloud-config\n")
json.dump(user_data, fp)
yield d
finally:
shutil.rmtree(d)
LOG.debug('Generating configdrive tree for node %(node)s with '
'metadata %(meta)s', {'node': _utils.log_res(node),
'meta': metadata})
return configdrive.build(metadata, user_data_bin)

View File

@ -52,12 +52,12 @@ class DefaultFormat(object):
else:
message = "Unprovisioning started for node %(node)s"
_print(message, node=_utils.log_node(node))
_print(message, node=_utils.log_res(node))
def show(self, instances):
for instance in instances:
_print("Node %(node)s, current state is %(state)s",
node=_utils.log_node(instance.node), state=instance.state)
node=_utils.log_res(instance.node), state=instance.state)
if instance.is_deployed:
ips = instance.ip_addresses()

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from metalsmith import _os_api
from metalsmith import _utils
_PROGRESS_STATES = frozenset(['deploying', 'wait call-back',
@ -30,15 +30,15 @@ _HEALTHY_STATES = _PROGRESS_STATES | _ACTIVE_STATES
class Instance(object):
"""Instance status in metalsmith."""
def __init__(self, api, node):
self._api = api
self._uuid = node.uuid
def __init__(self, connection, node):
self._connection = connection
self._uuid = node.id
self._node = node
@property
def hostname(self):
"""Node's hostname."""
return self._node.instance_info.get(_os_api.HOSTNAME_FIELD)
return self._node.instance_info.get(_utils.GetNodeMixin.HOSTNAME_FIELD)
def ip_addresses(self):
"""Returns IP addresses for this instance.
@ -61,12 +61,12 @@ class Instance(object):
@property
def _is_deployed_by_metalsmith(self):
return _os_api.HOSTNAME_FIELD in self._node.instance_info
return _utils.GetNodeMixin.HOSTNAME_FIELD in self._node.instance_info
@property
def is_healthy(self):
"""Whether the node is not at fault or maintenance."""
return self.state in _HEALTHY_STATES and not self._node.maintenance
return self.state in _HEALTHY_STATES and not self._node.is_maintenance
def nics(self):
"""List NICs for this instance.
@ -75,10 +75,10 @@ class Instance(object):
with full representations of their networks.
"""
result = []
vifs = self._api.list_node_attached_ports(self.node)
vifs = self._connection.baremetal.list_node_vifs(self.node)
for vif in vifs:
port = self._api.connection.network.get_port(vif.id)
port.network = self._api.connection.network.get_network(
port = self._connection.network.get_port(vif)
port.network = self._connection.network.get_network(
port.network_id)
result.append(port)
return result
@ -110,7 +110,7 @@ class Instance(object):
elif prov_state in _ERROR_STATES:
return 'error'
elif prov_state in _ACTIVE_STATES:
if self._node.maintenance:
if self._node.is_maintenance:
return 'maintenance'
else:
return 'active'

View File

@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
class NICs(object):
"""Requested NICs."""
def __init__(self, api, node, nics):
def __init__(self, connection, node, nics):
if nics is None:
nics = []
@ -38,7 +38,7 @@ class NICs(object):
raise TypeError("Each NIC must be a dict got %s" % nic)
self._node = node
self._api = api
self._connection = connection
self._nics = nics
self._validated = None
self.created_ports = []
@ -68,25 +68,26 @@ class NICs(object):
for nic_type, nic in self._validated:
if nic_type == 'network':
port = self._api.connection.network.create_port(**nic)
port = self._connection.network.create_port(**nic)
self.created_ports.append(port.id)
LOG.info('Created port %(port)s for node %(node)s with '
'%(nic)s', {'port': _utils.log_res(port),
'node': _utils.log_node(self._node),
'node': _utils.log_res(self._node),
'nic': nic})
else:
port = nic
self._api.attach_port_to_node(self._node.uuid, port.id)
self._connection.baremetal.attach_vif_to_node(self._node,
port.id)
LOG.info('Attached port %(port)s to node %(node)s',
{'port': _utils.log_res(port),
'node': _utils.log_node(self._node)})
'node': _utils.log_res(self._node)})
self.attached_ports.append(port.id)
def detach_and_delete_ports(self):
"""Detach attached port and delete previously created ones."""
detach_and_delete_ports(self._api, self._node, self.created_ports,
self.attached_ports)
detach_and_delete_ports(self._connection, self._node,
self.created_ports, self.attached_ports)
def _get_port(self, nic):
"""Validate and get the NIC information for a port.
@ -100,7 +101,7 @@ class NICs(object):
'Unexpected fields for a port: %s' % ', '.join(unexpected))
try:
port = self._api.connection.network.find_port(
port = self._connection.network.find_port(
nic['port'], ignore_missing=False)
except Exception as exc:
raise exceptions.InvalidNIC(
@ -122,7 +123,7 @@ class NICs(object):
'Unexpected fields for a network: %s' % ', '.join(unexpected))
try:
network = self._api.connection.network.find_network(
network = self._connection.network.find_network(
nic['network'], ignore_missing=False)
except Exception as exc:
raise exceptions.InvalidNIC(
@ -136,33 +137,32 @@ class NICs(object):
return port_args
def detach_and_delete_ports(api, node, created_ports, attached_ports):
def detach_and_delete_ports(connection, node, created_ports, attached_ports):
"""Detach attached port and delete previously created ones.
:param api: `Api` instance.
:param connection: `openstacksdk.Connection` instance.
:param node: `Node` object to detach ports from.
:param created_ports: List of IDs of previously created ports.
:param attached_ports: List of IDs of previously attached_ports.
"""
for port_id in set(attached_ports + created_ports):
LOG.debug('Detaching port %(port)s from node %(node)s',
{'port': port_id, 'node': node.uuid})
{'port': port_id, 'node': _utils.log_res(node)})
try:
api.detach_port_from_node(node, port_id)
connection.baremetal.detach_vif_from_node(node, port_id)
except Exception as exc:
LOG.debug('Failed to remove VIF %(vif)s from node %(node)s, '
'assuming already removed: %(exc)s',
{'vif': port_id, 'node': _utils.log_node(node),
{'vif': port_id, 'node': _utils.log_res(node),
'exc': exc})
for port_id in created_ports:
LOG.debug('Deleting port %s', port_id)
try:
api.connection.network.delete_port(port_id,
ignore_missing=False)
connection.network.delete_port(port_id, ignore_missing=False)
except Exception as exc:
LOG.warning('Failed to delete neutron port %(port)s: %(exc)s',
{'port': port_id, 'exc': exc})
else:
LOG.info('Deleted port %(port)s for node %(node)s',
{'port': port_id, 'node': _utils.log_node(node)})
{'port': port_id, 'node': _utils.log_res(node)})

View File

@ -1,182 +0,0 @@
# Copyright 2015-2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
from ironicclient import client as ir_client
import six
from metalsmith import _utils
LOG = logging.getLogger(__name__)
HOSTNAME_FIELD = 'metalsmith_hostname'
class _Remove(object):
"""Indicator that a field should be removed."""
__slots__ = ()
def __repr__(self):
"""Allow nicer logging."""
return '<REMOVE>'
REMOVE = _Remove()
class DictWithAttrs(dict):
__slots__ = ()
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
super(DictWithAttrs, self).__getattr__(attr)
class API(object):
"""Various OpenStack API's."""
IRONIC_VERSION = '1'
# TODO(dtantsur): use openstacksdk and stop hardcoding this here.
# 1.46 (Rocky) adds conductor_group.
IRONIC_MICRO_VERSION = '1.46'
_node_list = None
def __init__(self, session, connection):
self.ironic = ir_client.get_client(
self.IRONIC_VERSION, session=session,
os_ironic_api_version=self.IRONIC_MICRO_VERSION)
self.connection = connection
def _nodes_for_lookup(self):
return self.list_nodes(maintenance=None,
associated=None,
provision_state=None,
fields=['uuid', 'name', 'instance_info'])
def attach_port_to_node(self, node, port_id):
self.ironic.node.vif_attach(_node_id(node), port_id)
@contextlib.contextmanager
def cache_node_list_for_lookup(self):
if self._node_list is None:
self._node_list = self._nodes_for_lookup()
yield self._node_list
self._node_list = None
def detach_port_from_node(self, node, port_id):
self.ironic.node.vif_detach(_node_id(node), port_id)
def find_node_by_hostname(self, hostname):
nodes = self._node_list or self._nodes_for_lookup()
existing = [n for n in nodes
if n.instance_info.get(HOSTNAME_FIELD) == hostname]
if len(existing) > 1:
raise RuntimeError("More than one node found with hostname "
"%(host)s: %(nodes)s" %
{'host': hostname,
'nodes': ', '.join(_utils.log_node(n)
for n in existing)})
elif not existing:
return None
else:
# Fetch the complete node record
return self.get_node(existing[0].uuid, accept_hostname=False)
def get_node(self, node, refresh=False, accept_hostname=False):
if isinstance(node, six.string_types):
if accept_hostname and _utils.is_hostname_safe(node):
by_hostname = self.find_node_by_hostname(node)
if by_hostname is not None:
return by_hostname
return self.ironic.node.get(node)
elif hasattr(node, 'node'):
# Instance object
node = node.node
else:
node = node
if refresh:
return self.ironic.node.get(node.uuid)
else:
return node
def list_node_attached_ports(self, node):
return self.ironic.node.vif_list(_node_id(node))
def list_node_ports(self, node):
return self.ironic.node.list_ports(_node_id(node), limit=0)
def list_nodes(self, maintenance=False, associated=False,
provision_state='available', **filters):
if 'fields' not in filters:
filters['detail'] = True
return self.ironic.node.list(limit=0, maintenance=maintenance,
associated=associated,
provision_state=provision_state,
**filters)
def node_action(self, node, action, **kwargs):
self.ironic.node.set_provision_state(_node_id(node), action, **kwargs)
def release_node(self, node):
return self.update_node(_node_id(node), instance_uuid=REMOVE)
def reserve_node(self, node, instance_uuid):
return self.update_node(_node_id(node), instance_uuid=instance_uuid)
def update_node(self, node, *args, **attrs):
if args:
attrs.update(args[0])
patches = _convert_patches(attrs)
return self.ironic.node.update(_node_id(node), patches)
def validate_node(self, node, validate_deploy=False):
ifaces = ['power', 'management']
if validate_deploy:
ifaces += ['deploy']
validation = self.ironic.node.validate(_node_id(node))
for iface in ifaces:
result = getattr(validation, iface)
if not result['result']:
raise RuntimeError('%s: %s' % (iface, result['reason']))
def _node_id(node):
if isinstance(node, six.string_types):
return node
else:
return node.uuid
def _convert_patches(attrs):
patches = []
for key, value in attrs.items():
if not key.startswith('/'):
key = '/' + key
if value is REMOVE:
patches.append({'op': 'remove', 'path': key})
else:
patches.append({'op': 'add', 'path': key, 'value': value})
return patches

View File

@ -16,7 +16,6 @@
import logging
import random
import sys
import time
import warnings
from openstack import connection
@ -25,7 +24,6 @@ import six
from metalsmith import _config
from metalsmith import _instance
from metalsmith import _nics
from metalsmith import _os_api
from metalsmith import _scheduler
from metalsmith import _utils
from metalsmith import exceptions
@ -38,7 +36,7 @@ _CREATED_PORTS = 'metalsmith_created_ports'
_ATTACHED_PORTS = 'metalsmith_attached_ports'
class Provisioner(object):
class Provisioner(_utils.GetNodeMixin):
"""API to deploy/undeploy nodes with OpenStack.
:param session: `Session` object (from ``keystoneauth``) to use when
@ -63,11 +61,7 @@ class Provisioner(object):
'but not both')
else:
self.connection = connection.Connection(config=cloud_region)
# NOTE(dtantsur): Connection.baremetal is a keystoneauth Adapter
# for baremetal API.
session = self.connection.baremetal
self._api = _os_api.API(session, self.connection)
self._dry_run = dry_run
def reserve_node(self, resource_class=None, conductor_group=None,
@ -103,13 +97,15 @@ class Provisioner(object):
DeprecationWarning)
if candidates:
nodes = [self._api.get_node(node) for node in candidates]
nodes = [self._get_node(node) for node in candidates]
filters = [
_scheduler.NodeTypeFilter(resource_class, conductor_group),
]
else:
nodes = self._api.list_nodes(resource_class=resource_class,
conductor_group=conductor_group)
nodes = list(self.connection.baremetal.nodes(
resource_class=resource_class,
conductor_group=conductor_group,
details=True))
if not nodes:
raise exceptions.NodesNotFound(resource_class, conductor_group)
# Ensure parallel executions don't try nodes in the same sequence
@ -124,18 +120,16 @@ class Provisioner(object):
if predicate is not None:
filters.append(_scheduler.CustomPredicateFilter(predicate))
reserver = _scheduler.IronicReserver(self._api)
instance_info = {}
if capabilities:
instance_info['capabilities'] = capabilities
if traits:
instance_info['traits'] = traits
reserver = _scheduler.IronicReserver(self.connection,
instance_info)
node = _scheduler.schedule_node(nodes, filters, reserver,
dry_run=self._dry_run)
update = {}
if capabilities:
update['/instance_info/capabilities'] = capabilities
if traits:
update['/instance_info/traits'] = traits
if update:
node = self._api.update_node(node, update)
LOG.debug('Reserved node: %s', node)
return node
@ -148,28 +142,29 @@ class Provisioner(object):
reserved by us or are in maintenance mode.
"""
try:
node = self._api.get_node(node)
node = self._get_node(node)
except Exception as exc:
raise exceptions.InvalidNode('Cannot find node %(node)s: %(exc)s' %
{'node': node, 'exc': exc})
if not node.instance_uuid:
if not node.instance_id:
if not self._dry_run:
LOG.debug('Node %s not reserved yet, reserving',
_utils.log_node(node))
self._api.reserve_node(node, instance_uuid=node.uuid)
elif node.instance_uuid != node.uuid:
_utils.log_res(node))
self.connection.baremetal.update_node(
node, instance_id=node.id)
elif node.instance_id != node.id:
raise exceptions.InvalidNode('Node %(node)s already reserved '
'by instance %(inst)s outside of '
'metalsmith, cannot deploy on it' %
{'node': _utils.log_node(node),
'inst': node.instance_uuid})
{'node': _utils.log_res(node),
'inst': node.instance_id})
if node.maintenance:
if node.is_maintenance:
raise exceptions.InvalidNode('Refusing to deploy on node %(node)s '
'which is in maintenance mode due to '
'%(reason)s' %
{'node': _utils.log_node(node),
{'node': _utils.log_res(node),
'reason': node.maintenance_reason})
return node
@ -187,17 +182,17 @@ class Provisioner(object):
if node.name and _utils.is_hostname_safe(node.name):
return node.name
else:
return node.uuid
return node.id
if not _utils.is_hostname_safe(hostname):
raise ValueError("%s cannot be used as a hostname" % hostname)
existing = self._api.find_node_by_hostname(hostname)
if existing is not None and existing.uuid != node.uuid:
existing = self._find_node_by_hostname(hostname)
if existing is not None and existing.id != node.id:
raise ValueError("The following node already uses hostname "
"%(host)s: %(node)s" %
{'host': hostname,
'node': _utils.log_node(existing)})
'node': _utils.log_res(existing)})
return hostname
@ -256,7 +251,7 @@ class Provisioner(object):
image = sources.GlanceImage(image)
node = self._check_node_for_deploy(node)
nics = _nics.NICs(self._api, node, nics)
nics = _nics.NICs(self.connection, node, nics)
try:
hostname = self._check_hostname(node, hostname)
@ -271,62 +266,71 @@ class Provisioner(object):
if self._dry_run:
LOG.warning('Dry run, not provisioning node %s',
_utils.log_node(node))
_utils.log_res(node))
return node
nics.create_and_attach_ports()
capabilities['boot_option'] = 'netboot' if netboot else 'local'
updates = {'/instance_info/root_gb': root_size_gb,
'/instance_info/capabilities': capabilities,
'/extra/%s' % _CREATED_PORTS: nics.created_ports,
'/extra/%s' % _ATTACHED_PORTS: nics.attached_ports,
'/instance_info/%s' % _os_api.HOSTNAME_FIELD: hostname}
updates.update(image._node_updates(self.connection))
instance_info = node.instance_info.copy()
instance_info['root_gb'] = root_size_gb
instance_info['capabilities'] = capabilities
instance_info[self.HOSTNAME_FIELD] = hostname
extra = node.extra.copy()
extra[_CREATED_PORTS] = nics.created_ports
extra[_ATTACHED_PORTS] = nics.attached_ports
instance_info.update(image._node_updates(self.connection))
if traits is not None:
updates['/instance_info/traits'] = traits
instance_info['traits'] = traits
if swap_size_mb is not None:
updates['/instance_info/swap_mb'] = swap_size_mb
instance_info['swap_mb'] = swap_size_mb
LOG.debug('Updating node %(node)s with %(updates)s',
{'node': _utils.log_node(node), 'updates': updates})
node = self._api.update_node(node, updates)
self._api.validate_node(node, validate_deploy=True)
LOG.debug('Updating node %(node)s with instance info %(iinfo)s '
'and extras %(extra)s', {'node': _utils.log_res(node),
'iinfo': instance_info,
'extra': extra})
node = self.connection.baremetal.update_node(
node, instance_info=instance_info, extra=extra)
self.connection.baremetal.validate_node(node)
LOG.debug('Generating a configdrive for node %s',
_utils.log_node(node))
with config.build_configdrive_directory(node, hostname) as cd:
self._api.node_action(node, 'active',
configdrive=cd)
_utils.log_res(node))
cd = config.build_configdrive(node, hostname)
# TODO(dtantsur): move this to openstacksdk?
if not isinstance(cd, six.string_types):
cd = cd.decode('utf-8')
LOG.debug('Starting provisioning of node %s', _utils.log_res(node))
self.connection.baremetal.set_node_provision_state(
node, 'active', config_drive=cd)
except Exception:
exc_info = sys.exc_info()
try:
LOG.error('Deploy attempt failed on node %s, cleaning up',
_utils.log_node(node))
_utils.log_res(node))
self._clean_up(node, nics=nics)
except Exception:
LOG.exception('Clean up failed')
six.reraise(*exc_info)
LOG.info('Provisioning started on node %s', _utils.log_node(node))
LOG.info('Provisioning started on node %s', _utils.log_res(node))
if wait is not None:
LOG.debug('Waiting for node %(node)s to reach state active '
'with timeout %(timeout)s',
{'node': _utils.log_node(node), 'timeout': wait})
{'node': _utils.log_res(node), 'timeout': wait})
instance = self.wait_for_provisioning([node], timeout=wait)[0]
LOG.info('Deploy succeeded on node %s', _utils.log_node(node))
LOG.info('Deploy succeeded on node %s', _utils.log_res(node))
else:
# Update the node to return it's latest state
node = self._api.get_node(node, refresh=True)
instance = _instance.Instance(self._api, node)
node = self._get_node(node, refresh=True)
instance = _instance.Instance(self.connection, node)
return instance
def wait_for_provisioning(self, nodes, timeout=None, delay=15):
def wait_for_provisioning(self, nodes, timeout=None, delay=None):
"""Wait for nodes to be provisioned.
Loops until all nodes finish provisioning.
@ -336,96 +340,46 @@ class Provisioner(object):
:param timeout: How much time (in seconds) to wait for all nodes
to finish provisioning. If ``None`` (the default), wait forever
(more precisely, until the operation times out on server side).
:param delay: Delay (in seconds) between two provision state checks.
:param delay: DEPRECATED, do not use.
:return: List of updated :py:class:`metalsmith.Instance` objects if
all succeeded.
:raises: :py:class:`metalsmith.exceptions.DeploymentFailure`
if the deployment failed or timed out for any nodes.
"""
nodes = self._wait_for_state(nodes, 'active',
timeout=timeout, delay=delay)
return [_instance.Instance(self._api, node) for node in nodes]
def _wait_for_state(self, nodes, state, timeout, delay=15):
if timeout is not None and timeout <= 0:
raise ValueError("The timeout argument must be a positive int")
if delay < 0:
raise ValueError("The delay argument must be a non-negative int")
failed_nodes = []
finished_nodes = []
deadline = time.time() + timeout if timeout is not None else None
while timeout is None or time.time() < deadline:
remaining_nodes = []
for node in nodes:
node = self._api.get_node(node, refresh=True,
accept_hostname=True)
if node.provision_state == state:
LOG.debug('Node %(node)s reached state %(state)s',
{'node': _utils.log_node(node), 'state': state})
finished_nodes.append(node)
elif (node.provision_state == 'error' or
node.provision_state.endswith(' failed')):
LOG.error('Node %(node)s failed deployment: %(error)s',
{'node': _utils.log_node(node),
'error': node.last_error})
failed_nodes.append(node)
else:
remaining_nodes.append(node)
if remaining_nodes:
nodes = remaining_nodes
else:
nodes = []
break
LOG.debug('Still waiting for the following nodes to reach state '
'%(state)s: %(nodes)s',
{'state': state,
'nodes': ', '.join(_utils.log_node(n) for n in nodes)})
time.sleep(delay)
messages = []
if failed_nodes:
messages.append('the following nodes failed deployment: %s' %
', '.join('%s (%s)' % (_utils.log_node(node),
node.last_error)
for node in failed_nodes))
if nodes:
messages.append('deployment timed out for nodes %s' %
', '.join(_utils.log_node(node) for node in nodes))
if messages:
raise exceptions.DeploymentFailure(
'Deployment failed: %s' % '; '.join(messages),
failed_nodes + nodes)
else:
LOG.debug('All nodes reached state %s', state)
return finished_nodes
if delay is not None:
warnings.warn("The delay argument to wait_for_provisioning is "
"deprecated and has not effect", DeprecationWarning)
nodes = [self._get_node(n, accept_hostname=True) for n in nodes]
nodes = self.connection.baremetal.wait_for_nodes_provision_state(
nodes, 'active', timeout=timeout)
return [_instance.Instance(self.connection, node) for node in nodes]
def _clean_up(self, node, nics=None):
if nics is None:
created_ports = node.extra.get(_CREATED_PORTS, [])
attached_ports = node.extra.get(_ATTACHED_PORTS, [])
_nics.detach_and_delete_ports(self._api, node, created_ports,
attached_ports)
_nics.detach_and_delete_ports(self.connection, node,
created_ports, attached_ports)
else:
nics.detach_and_delete_ports()
update = {'/extra/%s' % item: _os_api.REMOVE
for item in (_CREATED_PORTS, _ATTACHED_PORTS)}
update['/instance_info/%s' % _os_api.HOSTNAME_FIELD] = _os_api.REMOVE
LOG.debug('Updating node %(node)s with %(updates)s',
{'node': _utils.log_node(node), 'updates': update})
extra = node.extra.copy()
for item in (_CREATED_PORTS, _ATTACHED_PORTS):
extra.pop(item, None)
instance_info = node.instance_info.copy()
instance_info.pop(self.HOSTNAME_FIELD, None)
LOG.debug('Updating node %(node)s with instance info %(iinfo)s '
'and extras %(extra)s and releasing the lock',
{'node': _utils.log_res(node),
'iinfo': instance_info,
'extra': extra})
try:
self._api.update_node(node, update)
self.connection.baremetal.update_node(
node, instance_info=instance_info, extra=extra,
instance_id=None)
except Exception as exc:
LOG.debug('Failed to clear node %(node)s extra: %(exc)s',
{'node': _utils.log_node(node), 'exc': exc})
LOG.debug('Releasing lock on node %s', _utils.log_node(node))
self._api.release_node(node)
{'node': _utils.log_res(node), 'exc': exc})
def unprovision_node(self, node, wait=None):
"""Unprovision a previously provisioned node.
@ -436,21 +390,23 @@ class Provisioner(object):
None to return immediately.
:return: the latest `Node` object.
"""
node = self._api.get_node(node, accept_hostname=True)
node = self._get_node(node, accept_hostname=True)
if self._dry_run:
LOG.warning("Dry run, not unprovisioning")
return
self._clean_up(node)
self._api.node_action(node, 'deleted')
node = self.connection.baremetal.set_node_provision_state(
node, 'deleted', wait=False)
LOG.info('Deleting started for node %s', _utils.log_node(node))
LOG.info('Deleting started for node %s', _utils.log_res(node))
if wait is not None:
self._wait_for_state([node], 'available', timeout=wait)
LOG.info('Node %s undeployed successfully', _utils.log_node(node))
node = self.connection.baremetal.wait_for_nodes_provision_state(
[node], 'available', timeout=wait)[0]
LOG.info('Node %s undeployed successfully', _utils.log_res(node))
return self._api.get_node(node, refresh=True)
return node
def show_instance(self, instance_id):
"""Show information about instance.
@ -470,11 +426,11 @@ class Provisioner(object):
:return: list of :py:class:`metalsmith.Instance` objects in the same
order as ``instances``.
"""
with self._api.cache_node_list_for_lookup():
with self._cache_node_list_for_lookup():
return [
_instance.Instance(
self._api,
self._api.get_node(inst, accept_hostname=True))
self.connection,
self._get_node(inst, accept_hostname=True))
for inst in instances
]
@ -483,8 +439,9 @@ class Provisioner(object):
:return: list of :py:class:`metalsmith.Instance` objects.
"""
nodes = self._api.list_nodes(provision_state=None, associated=True)
nodes = self.connection.baremetal.nodes(associated=True, details=True)
instances = [i for i in
(_instance.Instance(self._api, node) for node in nodes)
(_instance.Instance(self.connection, node)
for node in nodes)
if i._is_deployed_by_metalsmith]
return instances

View File

@ -17,6 +17,7 @@ import abc
import collections
import logging
from openstack import exceptions as sdk_exc
import six
from metalsmith import _utils
@ -100,13 +101,13 @@ def schedule_node(nodes, filters, reserver, dry_run=False):
for node in nodes:
try:
result = reserver(node)
except Exception as exc:
except sdk_exc.SDKException as exc:
LOG.debug('Node %(node)s was not reserved (%(exc)s), moving on '
'to the next one',
{'node': _utils.log_node(node), 'exc': exc})
{'node': _utils.log_res(node), 'exc': exc})
else:
LOG.info('Node %s reserved for deployment',
_utils.log_node(result))
_utils.log_res(result))
return result
LOG.debug('No nodes could be reserved')
@ -149,25 +150,25 @@ class CapabilitiesFilter(Filter):
caps = _utils.get_capabilities(node)
except Exception:
LOG.exception('Malformed capabilities on node %(node)s: %(caps)s',
{'node': _utils.log_node(node),
{'node': _utils.log_res(node),
'caps': node.properties.get('capabilities')})
return False
LOG.debug('Capabilities for node %(node)s: %(caps)s',
{'node': _utils.log_node(node), 'caps': caps})
{'node': _utils.log_res(node), 'caps': caps})
for key, value in self._capabilities.items():
try:
node_value = caps[key]
except KeyError:
LOG.debug('Node %(node)s does not have capability %(cap)s',
{'node': _utils.log_node(node), 'cap': key})
{'node': _utils.log_res(node), 'cap': key})
return False
else:
self._counter["%s=%s" % (key, node_value)] += 1
if value != node_value:
LOG.debug('Node %(node)s has capability %(cap)s of '
'value "%(node_val)s" instead of "%(expected)s"',
{'node': _utils.log_node(node), 'cap': key,
{'node': _utils.log_res(node), 'cap': key,
'node_val': node_value, 'expected': value})
return False
@ -197,14 +198,14 @@ class TraitsFilter(Filter):
traits = node.traits or []
LOG.debug('Traits for node %(node)s: %(traits)s',
{'node': _utils.log_node(node), 'traits': traits})
{'node': _utils.log_res(node), 'traits': traits})
for trait in traits:
self._counter[trait] += 1
missing = set(self._traits) - set(traits)
if missing:
LOG.debug('Node %(node)s does not have traits %(missing)s',
{'node': _utils.log_node(node), 'missing': missing})
{'node': _utils.log_res(node), 'missing': missing})
return False
return True
@ -239,24 +240,28 @@ class CustomPredicateFilter(Filter):
class IronicReserver(Reserver):
def __init__(self, api):
self._api = api
def __init__(self, connection, instance_info=None):
self._connection = connection
self._failed_nodes = []
self._iinfo = instance_info or {}
def validate(self, node):
try:
self._api.validate_node(node)
except RuntimeError as exc:
self._connection.baremetal.validate_node(
node, required=('power', 'management'))
except sdk_exc.SDKException as exc:
message = ('Node %(node)s failed validation: %(err)s' %
{'node': _utils.log_node(node), 'err': exc})
{'node': _utils.log_res(node), 'err': exc})
LOG.warning(message)
raise exceptions.ValidationFailed(message)
def __call__(self, node):
try:
self.validate(node)
return self._api.reserve_node(node, instance_uuid=node.uuid)
except Exception:
iinfo = dict(node.instance_info or {}, **self._iinfo)
return self._connection.baremetal.update_node(
node, instance_id=node.id, instance_info=iinfo)
except sdk_exc.SDKException:
self._failed_nodes.append(node)
raise

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import re
import six
@ -20,13 +21,6 @@ import six
from metalsmith import exceptions
def log_node(node):
if node.name:
return '%s (UUID %s)' % (node.name, node.uuid)
else:
return node.uuid
def log_res(res):
if getattr(res, 'name', None):
return '%s (UUID %s)' % (res.name, res.id)
@ -56,12 +50,12 @@ def get_root_disk(root_size_gb, node):
except KeyError:
raise exceptions.UnknownRootDiskSize(
'No local_gb for node %s and no root partition size '
'specified' % log_node(node))
'specified' % log_res(node))
except (TypeError, ValueError, AssertionError):
raise exceptions.UnknownRootDiskSize(
'The local_gb for node %(node)s is invalid: '
'expected positive integer, got %(value)s' %
{'node': log_node(node),
{'node': log_res(node),
'value': node.properties['local_gb']})
# allow for partitioning and config drive
@ -104,3 +98,65 @@ def parse_checksums(checksums):
result[fname.strip().lstrip('*')] = checksum.strip()
return result
class GetNodeMixin(object):
"""A helper mixin for getting nodes with hostnames."""
HOSTNAME_FIELD = 'metalsmith_hostname'
_node_list = None
def _available_nodes(self):
return self.connection.baremetal.nodes(details=True,
associated=False,
provision_state='available',
is_maintenance=False)
def _nodes_for_lookup(self):
return self.connection.baremetal.nodes(
fields=['uuid', 'name', 'instance_info'])
def _find_node_by_hostname(self, hostname):
"""A helper to find a node by metalsmith hostname."""
nodes = self._node_list or self._nodes_for_lookup()
existing = [n for n in nodes
if n.instance_info.get(self.HOSTNAME_FIELD) == hostname]
if len(existing) > 1:
raise RuntimeError("More than one node found with hostname "
"%(host)s: %(nodes)s" %
{'host': hostname,
'nodes': ', '.join(log_res(n)
for n in existing)})
elif not existing:
return None
else:
# Fetch the complete node information before returning
return self.connection.baremetal.get_node(existing[0].id)
def _get_node(self, node, refresh=False, accept_hostname=False):
"""A helper to find and return a node."""
if isinstance(node, six.string_types):
if accept_hostname and is_hostname_safe(node):
by_hostname = self._find_node_by_hostname(node)
if by_hostname is not None:
return by_hostname
return self.connection.baremetal.get_node(node)
elif hasattr(node, 'node'):
# Instance object
node = node.node
else:
node = node
if refresh:
return self.connection.baremetal.get_node(node)
else:
return node
@contextlib.contextmanager
def _cache_node_list_for_lookup(self):
if self._node_list is None:
self._node_list = list(self._nodes_for_lookup())
yield self._node_list
self._node_list = None

View File

@ -69,12 +69,12 @@ class GlanceImage(_Source):
LOG.debug('Image: %s', self._image_obj)
updates = {
'/instance_info/image_source': self._image_obj.id
'image_source': self._image_obj.id
}
for prop in ('kernel', 'ramdisk'):
value = getattr(self._image_obj, '%s_id' % prop, None)
if value:
updates['/instance_info/%s' % prop] = value
updates[prop] = value
return updates
@ -144,8 +144,8 @@ class HttpWholeDiskImage(_Source):
LOG.debug('Image: %(image)s, checksum %(checksum)s',
{'image': self.url, 'checksum': self.checksum})
return {
'/instance_info/image_source': self.url,
'/instance_info/image_checksum': self.checksum,
'image_source': self.url,
'image_checksum': self.checksum,
}
@ -172,8 +172,8 @@ class HttpPartitionImage(HttpWholeDiskImage):
def _node_updates(self, connection):
updates = super(HttpPartitionImage, self)._node_updates(connection)
updates['/instance_info/kernel'] = self.kernel_url
updates['/instance_info/ramdisk'] = self.ramdisk_url
updates['kernel'] = self.kernel_url
updates['ramdisk'] = self.ramdisk_url
return updates
@ -203,8 +203,8 @@ class FileWholeDiskImage(_Source):
LOG.debug('Image: %(image)s, checksum %(checksum)s',
{'image': self.location, 'checksum': self.checksum})
return {
'/instance_info/image_source': self.location,
'/instance_info/image_checksum': self.checksum,
'image_source': self.location,
'image_checksum': self.checksum,
}
@ -239,6 +239,6 @@ class FilePartitionImage(FileWholeDiskImage):
def _node_updates(self, connection):
updates = super(FilePartitionImage, self)._node_updates(connection)
updates['/instance_info/kernel'] = self.kernel_location
updates['/instance_info/ramdisk'] = self.ramdisk_location
updates['kernel'] = self.kernel_location
updates['ramdisk'] = self.ramdisk_location
return updates

View File

@ -75,7 +75,7 @@ class TestDeploy(testtools.TestCase):
instance = mock_pr.return_value.provision_node.return_value
instance.create_autospec(_instance.Instance)
instance.node.name = None
instance.node.uuid = '123'
instance.node.id = '123'
instance.state = 'active'
instance.is_deployed = True
instance.ip_addresses.return_value = {'private': ['1.2.3.4']}
@ -127,7 +127,7 @@ class TestDeploy(testtools.TestCase):
instance.is_deployed = True
instance.ip_addresses.return_value = {}
instance.node.name = None
instance.node.uuid = '123'
instance.node.id = '123'
instance.state = 'active'
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
@ -142,7 +142,7 @@ class TestDeploy(testtools.TestCase):
instance.create_autospec(_instance.Instance)
instance.is_deployed = False
instance.node.name = None
instance.node.uuid = '123'
instance.node.id = '123'
instance.state = 'deploying'
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
@ -487,7 +487,7 @@ class TestUndeploy(testtools.TestCase):
def test_ok(self, mock_os_conf, mock_pr):
node = mock_pr.return_value.unprovision_node.return_value
node.uuid = '123'
node.id = '123'
node.name = None
node.provision_state = 'cleaning'
@ -506,7 +506,7 @@ class TestUndeploy(testtools.TestCase):
def test_custom_wait(self, mock_os_conf, mock_pr):
node = mock_pr.return_value.unprovision_node.return_value
node.uuid = '123'
node.id = '123'
node.name = None
node.provision_state = 'available'
@ -580,9 +580,9 @@ class TestShowWait(testtools.TestCase):
for hostname in ['hostname1', 'hostname2']
]
for inst in self.instances:
inst.node.uuid = inst.uuid
inst.node.id = inst.uuid
inst.node.name = 'name-%s' % inst.uuid
inst.to_dict.return_value = {inst.node.uuid: inst.node.name}
inst.to_dict.return_value = {inst.node.id: inst.node.name}
def test_show(self, mock_os_conf, mock_pr):
mock_pr.return_value.show_instances.return_value = self.instances

View File

@ -14,9 +14,9 @@
# limitations under the License.
import json
import os
import mock
from openstack.baremetal import configdrive
import testtools
from metalsmith import _config
@ -25,7 +25,7 @@ from metalsmith import _config
class TestInstanceConfig(testtools.TestCase):
def setUp(self):
super(TestInstanceConfig, self).setUp()
self.node = mock.Mock(uuid='1234')
self.node = mock.Mock(id='1234')
self.node.name = 'node name'
def _check(self, config, expected_metadata, expected_userdata=None):
@ -39,24 +39,19 @@ class TestInstanceConfig(testtools.TestCase):
'meta': {}}
expected_m.update(expected_metadata)
with config.build_configdrive_directory(self.node, 'example.com') as d:
for version in ('2012-08-10', 'latest'):
with open(os.path.join(d, 'openstack', version,
'meta_data.json')) as fp:
metadata = json.load(fp)
with mock.patch.object(configdrive, 'build', autospec=True) as mb:
result = config.build_configdrive(self.node, "example.com")
mb.assert_called_once_with(expected_m, mock.ANY)
self.assertIs(result, mb.return_value)
user_data = mb.call_args[0][1]
self.assertEqual(expected_m, metadata)
user_data = os.path.join(d, 'openstack', version, 'user_data')
if expected_userdata is None:
self.assertFalse(os.path.exists(user_data))
else:
with open(user_data) as fp:
lines = list(fp)
self.assertEqual('#cloud-config\n', lines[0])
user_data = json.loads(''.join(lines[1:]))
self.assertEqual(expected_userdata, user_data)
self.assertFalse(os.path.exists(d))
if expected_userdata:
self.assertIsNotNone(user_data)
user_data = user_data.decode('utf-8')
header, user_data = user_data.split('\n', 1)
self.assertEqual('#cloud-config', header)
user_data = json.loads(user_data)
self.assertEqual(expected_userdata, user_data)
def test_default(self):
config = _config.InstanceConfig()

View File

@ -23,21 +23,19 @@ class TestInstanceIPAddresses(test_provisioner.Base):
def setUp(self):
super(TestInstanceIPAddresses, self).setUp()
self.instance = _instance.Instance(self.api, self.node)
self.api.list_node_attached_ports.return_value = [
mock.Mock(spec=['id'], id=i) for i in ('111', '222')
]
self.api.baremetal.list_node_vifs.return_value = ['111', '222']
self.ports = [
mock.Mock(spec=['network_id', 'fixed_ips', 'network'],
network_id=n, fixed_ips=[{'ip_address': ip}])
for n, ip in [('0', '192.168.0.1'), ('1', '10.0.0.2')]
]
self.conn.network.get_port.side_effect = self.ports
self.api.network.get_port.side_effect = self.ports
self.nets = [
mock.Mock(spec=['id', 'name'], id=str(i)) for i in range(2)
]
for n in self.nets:
n.name = 'name-%s' % n.id
self.conn.network.get_network.side_effect = self.nets
self.api.network.get_network.side_effect = self.nets
def test_ip_addresses(self):
ips = self.instance.ip_addresses()
@ -70,7 +68,7 @@ class TestInstanceStates(test_provisioner.Base):
self.assertTrue(self.instance.is_healthy)
def test_state_deploying_maintenance(self):
self.node.maintenance = True
self.node.is_maintenance = True
self.node.provision_state = 'wait call-back'
self.assertEqual('deploying', self.instance.state)
self.assertFalse(self.instance.is_deployed)
@ -83,7 +81,7 @@ class TestInstanceStates(test_provisioner.Base):
self.assertTrue(self.instance.is_healthy)
def test_state_maintenance(self):
self.node.maintenance = True
self.node.is_maintenance = True
self.node.provision_state = 'active'
self.assertEqual('maintenance', self.instance.state)
self.assertTrue(self.instance.is_deployed)
@ -112,5 +110,5 @@ class TestInstanceStates(test_provisioner.Base):
'ip_addresses': {'private': ['1.2.3.4']},
'node': {'node': 'dict'},
'state': 'deploying',
'uuid': self.node.uuid},
'uuid': self.node.id},
self.instance.to_dict())

View File

@ -1,129 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
import mock
import testtools
from metalsmith import _instance
from metalsmith import _os_api
class TestNodes(testtools.TestCase):
def setUp(self):
super(TestNodes, self).setUp()
self.session = mock.Mock()
self.ironic_fixture = self.useFixture(
fixtures.MockPatchObject(_os_api.ir_client, 'get_client',
autospec=True))
self.cli = self.ironic_fixture.mock.return_value
self.api = _os_api.API(session=self.session, connection=mock.Mock())
def test_get_node_by_uuid(self):
res = self.api.get_node('uuid1')
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_hostname(self):
self.cli.node.list.return_value = [
mock.Mock(uuid='uuid0', instance_info={}),
mock.Mock(uuid='uuid1',
instance_info={'metalsmith_hostname': 'host1'}),
]
res = self.api.get_node('host1', accept_hostname=True)
# Loading details
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_hostname_not_found(self):
self.cli.node.list.return_value = [
mock.Mock(uuid='uuid0', instance_info={}),
mock.Mock(uuid='uuid1',
instance_info={'metalsmith_hostname': 'host0'}),
]
res = self.api.get_node('host1', accept_hostname=True)
# Loading details
self.cli.node.get.assert_called_once_with('host1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_node(self):
res = self.api.get_node(mock.sentinel.node)
self.assertIs(res, mock.sentinel.node)
self.assertFalse(self.cli.node.get.called)
def test_get_node_by_node_with_refresh(self):
res = self.api.get_node(mock.Mock(spec=['uuid'], uuid='uuid1'),
refresh=True)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_instance(self):
inst = _instance.Instance(mock.Mock(), mock.Mock())
res = self.api.get_node(inst)
self.assertIs(res, inst.node)
self.assertFalse(self.cli.node.get.called)
def test_get_node_by_instance_with_refresh(self):
inst = _instance.Instance(mock.Mock(),
mock.Mock(spec=['uuid'], uuid='uuid1'))
res = self.api.get_node(inst, refresh=True)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_find_node_by_hostname(self):
self.cli.node.list.return_value = [
mock.Mock(uuid='uuid0', instance_info={}),
mock.Mock(uuid='uuid1',
instance_info={'metalsmith_hostname': 'host1'}),
]
res = self.api.find_node_by_hostname('host1')
# Loading details
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_find_node_by_hostname_cached(self):
self.cli.node.list.return_value = [
mock.Mock(uuid='uuid0', instance_info={}),
mock.Mock(uuid='uuid1',
instance_info={'metalsmith_hostname': 'host1'}),
]
with self.api.cache_node_list_for_lookup():
res = self.api.find_node_by_hostname('host1')
self.assertIs(res, self.cli.node.get.return_value)
self.assertIsNone(self.api.find_node_by_hostname('host2'))
self.assertEqual(1, self.cli.node.list.call_count)
# This call is no longer cached
self.assertIsNone(self.api.find_node_by_hostname('host2'))
self.assertEqual(2, self.cli.node.list.call_count)
def test_find_node_by_hostname_not_found(self):
self.cli.node.list.return_value = [
mock.Mock(uuid='uuid0', instance_info={}),
mock.Mock(uuid='uuid1',
instance_info={'metalsmith_hostname': 'host1'}),
]
self.assertIsNone(self.api.find_node_by_hostname('host0'))
self.assertFalse(self.cli.node.get.called)
def test_find_node_by_hostname_duplicate(self):
self.cli.node.list.return_value = [
mock.Mock(uuid='uuid0',
instance_info={'metalsmith_hostname': 'host1'}),
mock.Mock(uuid='uuid1',
instance_info={'metalsmith_hostname': 'host1'}),
]
self.assertRaisesRegex(RuntimeError, 'More than one node',
self.api.find_node_by_hostname, 'host1')
self.assertFalse(self.cli.node.get.called)

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@
# limitations under the License.
import mock
from openstack import exceptions as sdk_exc
import testtools
from metalsmith import _scheduler
@ -24,14 +25,14 @@ class TestScheduleNode(testtools.TestCase):
def setUp(self):
super(TestScheduleNode, self).setUp()
self.nodes = [mock.Mock(spec=['uuid', 'name']) for _ in range(2)]
self.nodes = [mock.Mock(spec=['id', 'name']) for _ in range(2)]
self.reserver = self._reserver(lambda x: x)
def _reserver(self, side_effect):
reserver = mock.Mock(spec=_scheduler.Reserver)
reserver.side_effect = side_effect
if isinstance(side_effect, Exception):
reserver.fail.side_effect = RuntimeError('failed')
reserver.fail.side_effect = exceptions.ReservationFailed('fail')
else:
reserver.fail.side_effect = AssertionError('called fail')
return reserver
@ -56,15 +57,16 @@ class TestScheduleNode(testtools.TestCase):
self.assertFalse(self.reserver.fail.called)
def test_reservation_one_failed(self):
reserver = self._reserver([Exception("boom"), self.nodes[1]])
reserver = self._reserver([sdk_exc.SDKException("boom"),
self.nodes[1]])
result = _scheduler.schedule_node(self.nodes, [], reserver)
self.assertIs(result, self.nodes[1])
self.assertEqual([mock.call(n) for n in self.nodes],
reserver.call_args_list)
def test_reservation_all_failed(self):
reserver = self._reserver(Exception("boom"))
self.assertRaisesRegex(RuntimeError, 'failed',
reserver = self._reserver(sdk_exc.SDKException("boom"))
self.assertRaisesRegex(exceptions.ReservationFailed, 'fail',
_scheduler.schedule_node,
self.nodes, [], reserver)
self.assertEqual([mock.call(n) for n in self.nodes],
@ -121,7 +123,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
def test_nothing_requested_nothing_found(self):
fltr = _scheduler.CapabilitiesFilter({})
node = mock.Mock(properties={}, spec=['properties', 'name', 'uuid'])
node = mock.Mock(properties={}, spec=['properties', 'name', 'id'])
self.assertTrue(fltr(node))
def test_matching_node(self):
@ -129,7 +131,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,profile:compute,answer:42'},
spec=['properties', 'name', 'uuid'])
spec=['properties', 'name', 'id'])
self.assertTrue(fltr(node))
def test_not_matching_node(self):
@ -137,14 +139,14 @@ class TestCapabilitiesFilter(testtools.TestCase):
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,answer:42'},
spec=['properties', 'name', 'uuid'])
spec=['properties', 'name', 'id'])
self.assertFalse(fltr(node))
def test_fail_message(self):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
node = mock.Mock(
properties={'capabilities': 'profile:control'},
spec=['properties', 'name', 'uuid'])
spec=['properties', 'name', 'id'])
self.assertFalse(fltr(node))
self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
@ -156,7 +158,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
for cap in ['foo,profile:control', 42, 'a:b:c']:
node = mock.Mock(properties={'capabilities': cap},
spec=['properties', 'name', 'uuid'])
spec=['properties', 'name', 'id'])
self.assertFalse(fltr(node))
self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
@ -175,24 +177,24 @@ class TestTraitsFilter(testtools.TestCase):
def test_no_traits(self):
fltr = _scheduler.TraitsFilter([])
node = mock.Mock(spec=['name', 'uuid'])
node = mock.Mock(spec=['name', 'id'])
self.assertTrue(fltr(node))
def test_ok(self):
fltr = _scheduler.TraitsFilter(['tr1', 'tr2'])
node = mock.Mock(spec=['name', 'uuid', 'traits'],
node = mock.Mock(spec=['name', 'id', 'traits'],
traits=['tr3', 'tr2', 'tr1'])
self.assertTrue(fltr(node))
def test_missing_one(self):
fltr = _scheduler.TraitsFilter(['tr1', 'tr2'])
node = mock.Mock(spec=['name', 'uuid', 'traits'],
node = mock.Mock(spec=['name', 'id', 'traits'],
traits=['tr3', 'tr1'])
self.assertFalse(fltr(node))
def test_missing_all(self):
fltr = _scheduler.TraitsFilter(['tr1', 'tr2'])
node = mock.Mock(spec=['name', 'uuid', 'traits'], traits=None)
node = mock.Mock(spec=['name', 'id', 'traits'], traits=None)
self.assertFalse(fltr(node))
@ -200,10 +202,12 @@ class TestIronicReserver(testtools.TestCase):
def setUp(self):
super(TestIronicReserver, self).setUp()
self.node = mock.Mock(spec=['uuid', 'name'])
self.api = mock.Mock(spec=['reserve_node', 'release_node',
'validate_node'])
self.api.reserve_node.side_effect = lambda node, instance_uuid: node
self.node = mock.Mock(spec=['id', 'name', 'instance_info'],
instance_info={})
self.api = mock.Mock(spec=['baremetal'])
self.api.baremetal = mock.Mock(spec=['update_node', 'validate_node'])
self.api.baremetal.update_node.side_effect = (
lambda node, **kw: node)
self.reserver = _scheduler.IronicReserver(self.api)
def test_fail(self):
@ -213,22 +217,36 @@ class TestIronicReserver(testtools.TestCase):
def test_ok(self):
self.assertEqual(self.node, self.reserver(self.node))
self.api.validate_node.assert_called_with(self.node)
self.api.reserve_node.assert_called_once_with(
self.node, instance_uuid=self.node.uuid)
self.api.baremetal.validate_node.assert_called_with(
self.node, required=('power', 'management'))
self.api.baremetal.update_node.assert_called_once_with(
self.node, instance_id=self.node.id, instance_info={})
def test_with_instance_info(self):
self.reserver = _scheduler.IronicReserver(self.api,
{'cat': 'meow'})
self.assertEqual(self.node, self.reserver(self.node))
self.api.baremetal.validate_node.assert_called_with(
self.node, required=('power', 'management'))
self.api.baremetal.update_node.assert_called_once_with(
self.node, instance_id=self.node.id,
instance_info={'cat': 'meow'})
def test_reservation_failed(self):
self.api.reserve_node.side_effect = RuntimeError('conflict')
self.assertRaisesRegex(RuntimeError, 'conflict',
self.api.baremetal.update_node.side_effect = (
sdk_exc.SDKException('conflict'))
self.assertRaisesRegex(sdk_exc.SDKException, 'conflict',
self.reserver, self.node)
self.api.validate_node.assert_called_with(self.node)
self.api.reserve_node.assert_called_once_with(
self.node, instance_uuid=self.node.uuid)
self.api.baremetal.validate_node.assert_called_with(
self.node, required=('power', 'management'))
self.api.baremetal.update_node.assert_called_once_with(
self.node, instance_id=self.node.id, instance_info={})
def test_validation_failed(self):
self.api.validate_node.side_effect = RuntimeError('fail')
self.api.baremetal.validate_node.side_effect = (
sdk_exc.SDKException('fail'))
self.assertRaisesRegex(exceptions.ValidationFailed, 'fail',
self.reserver, self.node)
self.api.validate_node.assert_called_once_with(self.node)
self.assertFalse(self.api.reserve_node.called)
self.assertFalse(self.api.release_node.called)
self.api.baremetal.validate_node.assert_called_with(
self.node, required=('power', 'management'))
self.assertFalse(self.api.baremetal.update_node.called)

View File

@ -19,7 +19,7 @@
include_role:
name: metalsmith_deployment
vars:
metalsmith_extra_args: -vv
metalsmith_extra_args: --debug
metalsmith_resource_class: baremetal
metalsmith_instances:
- hostname: test
@ -48,7 +48,7 @@
failed_when: instance_via_list.state != 'active' or instance_via_list.node.provision_state != 'active'
- name: Show active node information
command: openstack baremetal node show {{ instance.node.uuid }}
command: openstack baremetal node show {{ instance.node.id }}
- name: Get IP address
set_fact:
@ -69,7 +69,7 @@
command: metalsmith --debug undeploy --wait 900 test
- name: Get the current status of the deployed node
command: openstack baremetal node show {{ instance.node.uuid }} -f json
command: openstack baremetal node show {{ instance.node.id }} -f json
register: undeployed_node_result
- name: Parse node state
@ -87,7 +87,7 @@
when: undeployed_node.extra != {}
- name: Get attached VIFs for the node
command: openstack baremetal node vif list {{ instance.node.uuid }} -f value -c ID
command: openstack baremetal node vif list {{ instance.node.id }} -f value -c ID
register: vif_list_output
- name: Check that no VIFs are still attached

View File

@ -2,7 +2,6 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
openstacksdk>=0.17.0 # Apache-2.0
python-ironicclient>=1.14.0 # Apache-2.0
openstacksdk>=0.22.0 # Apache-2.0
requests>=2.18.4 # Apache-2.0
six>=1.10.0 # MIT