Grand refactor to split out reusable bits

This commit is contained in:
Dmitry Tantsur 2018-05-08 12:12:10 +02:00
parent fc37a7aaa3
commit 9f1b7755fc
18 changed files with 1207 additions and 504 deletions

View File

@ -2,7 +2,7 @@ language: python
python: 2.7
env:
- TOX_ENV=py27
- TOX_ENV=py34
- TOX_ENV=py3
- TOX_ENV=pep8
install:
- pip install tox

View File

@ -0,0 +1,17 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._exceptions import * # noqa
from ._provisioner import Provisioner # noqa

View File

@ -19,9 +19,9 @@ import os
import sys
from keystoneauth1.identity import generic
from keystoneauth1 import session
from metalsmith import deploy
from metalsmith import os_api
from metalsmith import _provisioner
LOG = logging.getLogger(__name__)
@ -35,19 +35,18 @@ def _do_deploy(api, args, wait=None):
else:
ssh_keys = []
deploy.deploy(api, args.resource_class,
image_id=args.image,
network_id=args.network,
root_disk_size=args.root_disk_size,
ssh_keys=ssh_keys,
capabilities=capabilities,
netboot=args.netboot,
wait=wait,
dry_run=args.dry_run)
node = api.reserve_node(args.resource_class, capabilities=capabilities)
api.provision_node(node,
image_ref=args.image,
network_refs=[args.network],
root_disk_size=args.root_disk_size,
ssh_keys=ssh_keys,
netboot=args.netboot,
wait=wait)
def _do_undeploy(api, args, wait=None):
deploy.undeploy(api, args.node, wait=wait)
api.unprovision_node(args.node, wait=wait)
def _parse_args(args):
@ -130,7 +129,8 @@ def main(args=sys.argv[1:]):
password=args.os_password,
user_domain_name=args.os_user_domain_name,
project_domain_name=args.os_project_domain_name)
api = os_api.API(auth)
sess = session.Session(auth=auth)
api = _provisioner.Provisioner(sess, dry_run=args.dry_run)
try:
args.func(api, args, wait=wait)

79
metalsmith/_exceptions.py Normal file
View File

@ -0,0 +1,79 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from metalsmith import _utils
class Error(Exception):
"""Base class for Metalsmith errors."""
class ReservationFailed(Error):
"""Failed to reserve a suitable node."""
def __init__(self, message, requested_resource_class,
requested_capabilities):
super(ReservationFailed, self).__init__(message)
self.requested_resource_class = requested_resource_class
self.requested_capabilities = requested_capabilities
class ResourceClassNotFound(ReservationFailed):
"""No nodes match the given resource class."""
def __init__(self, requested_resource_class, requested_capabilities):
message = ("No available nodes found with resource class %s" %
requested_resource_class)
super(ResourceClassNotFound, self).__init__(message,
requested_resource_class,
requested_capabilities)
class CapabilitiesNotFound(ReservationFailed):
"""Requested capabilities do not match any nodes."""
class ValidationFailed(ReservationFailed):
"""Validation failed for all requested nodes."""
class AllNodesReserved(ReservationFailed):
"""All nodes are already reserved."""
def __init__(self, requested_resource_class, requested_capabilities):
message = 'All the candidate nodes are already reserved'
super(AllNodesReserved, self).__init__(message,
requested_resource_class,
requested_capabilities)
class ReleaseFailed(Error):
"""Failed to release a lock on the node."""
def __init__(self, node, error):
message = 'Failed to release node %(node)s: %(error)s' % {
'node': _utils.log_node(node),
'error': error
}
super(ReleaseFailed, self).__init__(message)
class InvalidImage(Error):
"""Requested image is invalid and cannot be used."""
class InvalidNetwork(Error):
"""Requested network is invalid and cannot be used."""

View File

@ -17,8 +17,8 @@ import logging
import glanceclient
from ironicclient import client as ir_client
from keystoneauth1 import session
from neutronclient.v2_0 import client as neu_client
import six
LOG = logging.getLogger(__name__)
@ -42,10 +42,8 @@ class API(object):
IRONIC_VERSION = '1'
IRONIC_MICRO_VERSION = '1.28'
def __init__(self, auth):
LOG.debug('Creating a session')
self._auth = auth
self.session = session.Session(auth=auth)
def __init__(self, session):
self.session = session
LOG.debug('Creating service clients')
self.glance = glanceclient.Client(self.GLANCE_VERSION,
@ -55,6 +53,22 @@ class API(object):
self.IRONIC_VERSION, session=self.session,
os_ironic_api_version=self.IRONIC_MICRO_VERSION)
def attach_port_to_node(self, node, port_id):
self.ironic.node.vif_attach(_node_id(node), port_id)
def create_port(self, network_id, **kwargs):
port_body = dict(network_id=network_id,
admin_state_up=True,
**kwargs)
port = self.neutron.create_port({'port': port_body})
return DictWithAttrs(port['port'])
def delete_port(self, port_id):
self.neutron.delete_port(port_id)
def detach_port_from_node(self, node, port_id):
self.ironic.node.vif_detach(_node_id(node), port_id)
def get_image_info(self, image_id):
for img in self.glance.images.list():
if img.name == image_id or img.id == image_id:
@ -65,6 +79,18 @@ class API(object):
if net['name'] == network_id or net['id'] == network_id:
return DictWithAttrs(net)
def get_node(self, node):
if isinstance(node, six.string_types):
return self.ironic.node.get(node)
else:
return node
def list_node_attached_ports(self, node):
return self.ironic.node.vif_list(_node_id(node))
def list_node_ports(self, node):
return self.ironic.node.list_ports(_node_id(node), limit=0)
def list_nodes(self, resource_class=None, maintenance=False,
associated=False, provision_state='available', detail=True):
return self.ironic.node.list(limit=0, resource_class=resource_class,
@ -72,57 +98,53 @@ class API(object):
associated=associated, detail=detail,
provision_state=provision_state)
def list_node_ports(self, node_id):
return self.ironic.node.list_ports(node_id, limit=0)
def node_action(self, node, action, **kwargs):
self.ironic.node.set_provision_state(_node_id(node), action, **kwargs)
def _convert_patches(self, attrs):
patches = []
for key, value in attrs.items():
if not key.startswith('/'):
key = '/' + key
def release_node(self, node):
return self.update_node(_node_id(node), instance_uuid=REMOVE)
if value is REMOVE:
patches.append({'op': 'remove', 'path': key})
else:
patches.append({'op': 'add', 'path': key, 'value': value})
def reserve_node(self, node, instance_uuid):
return self.update_node(_node_id(node), instance_uuid=instance_uuid)
return patches
def update_node(self, node_id, *args, **attrs):
def update_node(self, node, *args, **attrs):
if args:
attrs.update(args[0])
patches = self._convert_patches(attrs)
return self.ironic.node.update(node_id, patches)
patches = _convert_patches(attrs)
return self.ironic.node.update(_node_id(node), patches)
def attach_port_to_node(self, node_id, port_id):
self.ironic.node.vif_attach(node_id, port_id)
def detach_port_from_node(self, node_id, port_id):
self.ironic.node.vif_detach(node_id, port_id)
def list_node_attached_ports(self, node_id):
return self.ironic.node.vif_list(node_id)
def validate_node(self, node_id, validate_deploy=False):
def validate_node(self, node, validate_deploy=False):
ifaces = ['power', 'management']
if validate_deploy:
ifaces += ['deploy']
validation = self.ironic.node.validate(node_id)
validation = self.ironic.node.validate(_node_id(node))
for iface in ifaces:
result = getattr(validation, iface)
if not result['result']:
raise RuntimeError('%s: %s' % (iface, result['reason']))
def create_port(self, network_id, mac_address):
port_body = {'mac_address': mac_address,
'network_id': network_id,
'admin_state_up': True}
port = self.neutron.create_port({'port': port_body})
return DictWithAttrs(port['port'])
def wait_for_active(self, node, timeout):
self.ironic.node.wait_for_provision_state(_node_id(node), 'active',
timeout=timeout)
def delete_port(self, port_id):
self.neutron.delete_port(port_id)
def node_action(self, node_id, action, **kwargs):
self.ironic.node.set_provision_state(node_id, action, **kwargs)
def _node_id(node):
if isinstance(node, six.string_types):
return node
else:
return node.uuid
def _convert_patches(attrs):
patches = []
for key, value in attrs.items():
if not key.startswith('/'):
key = '/' + key
if value is REMOVE:
patches.append({'op': 'remove', 'path': key})
else:
patches.append({'op': 'add', 'path': key, 'value': value})
return patches

230
metalsmith/_provisioner.py Normal file
View File

@ -0,0 +1,230 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from oslo_utils import excutils
from metalsmith import _exceptions
from metalsmith import _os_api
from metalsmith import _scheduler
from metalsmith import _utils
LOG = logging.getLogger(__name__)
_CREATED_PORTS = 'metalsmith_created_ports'
class Provisioner(object):
"""API to deploy/undeploy nodes with OpenStack."""
def __init__(self, session, dry_run=False):
self._api = _os_api.API(session)
self._dry_run = dry_run
def reserve_node(self, resource_class, capabilities=None):
"""Find and reserve a suitable node.
:param resource_class: Requested resource class.
:param capabilities: Requested capabilities as a dict.
:return: reserved Node object
:raises: ReservationFailed
"""
capabilities = capabilities or {}
nodes = self._api.list_nodes(resource_class=resource_class)
if not nodes:
raise _exceptions.ResourceClassNotFound(resource_class,
capabilities)
# Make sure parallel executions don't try nodes in the same sequence
random.shuffle(nodes)
LOG.debug('Ironic nodes: %s', nodes)
filters = [_scheduler.CapabilitiesFilter(resource_class, capabilities),
_scheduler.ValidationFilter(self._api,
resource_class, capabilities)]
reserver = _scheduler.IronicReserver(self._api, resource_class,
capabilities)
return _scheduler.schedule_node(nodes, filters, reserver,
dry_run=self._dry_run)
def provision_node(self, node, image_ref, network_refs,
root_disk_size=None, ssh_keys=None, netboot=False,
wait=None):
"""Provision the node with the given image.
:param node: Node object, UUID or name.
:param image_ref: Image name or UUID to provision.
:param network_refs: List of network names or UUIDs to use.
:param root_disk_size: The size of the root partition. By default
the value of the local_gb property is used.
:param ssh_keys: list of public parts of the SSH keys to upload
to the nodes.
:param netboot: Whether to use networking boot for final instances.
:param wait: How many seconds to wait for the deployment to finish,
None to return immediately.
:return: Reservation
"""
node = self._api.get_node(node)
root_disk_size = _utils.get_root_disk(root_disk_size, node)
image = self._api.get_image_info(image_ref)
if image is None:
raise _exceptions.InvalidImage('Image %s does not exist' %
image_ref)
# TODO(dtantsur): support whole-disk images
for im_prop in ('kernel_id', 'ramdisk_id'):
if not getattr(image, im_prop, None):
raise _exceptions.InvalidImage('%s is required on image' %
im_prop)
LOG.debug('Image: %s', image)
networks = self._get_networks(network_refs)
if self._dry_run:
LOG.warning('Dry run, not provisioning node %s',
_utils.log_node(node))
return node
created_ports = self._create_ports(node, networks)
target_caps = {'boot_option': 'netboot' if netboot else 'local'}
# TODO(dtantsur): support whole-disk images
updates = {'/instance_info/ramdisk': image.ramdisk_id,
'/instance_info/kernel': image.kernel_id,
'/instance_info/image_source': image.id,
'/instance_info/root_gb': root_disk_size,
'/instance_info/capabilities': target_caps,
'/extra/%s' % _CREATED_PORTS: created_ports}
try:
node = self._api.update_node(node, updates)
self._api.validate_node(node, validate_deploy=True)
with _utils.config_drive_dir(node, ssh_keys) as cd:
self._api.node_action(node, 'active',
configdrive=cd)
LOG.info('Provisioning started on node %s', _utils.log_node(node))
if wait is not None:
self._api.wait_for_active(node, timeout=wait)
# Update the node to return it's latest state
node = self._api.get_node(node)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Deploy attempt failed on node %s, cleaning up',
_utils.log_node(node))
try:
self._clean_up(node, created_ports)
except Exception:
LOG.exception('Clean up failed')
if wait is not None:
LOG.info('Deploy succeeded on node %s', _utils.log_node(node))
return node
def _get_networks(self, network_refs):
"""Validate and get the networks."""
networks = []
for network_ref in network_refs:
network = self._api.get_network(network_ref)
if network is None:
raise _exceptions.InvalidNetwork('Network %s does not exist' %
network_ref)
LOG.debug('Network: %s', network)
networks.append(network)
return networks
def _clean_up(self, node, created_ports=None):
"""Clean up a failed deployment."""
if self._dry_run:
LOG.debug("Dry run, not cleaning up")
return
if created_ports is None:
created_ports = node.extra.get(_CREATED_PORTS, [])
for port_id in created_ports:
LOG.debug('Detaching port %(port)s from node %(node)s',
{'port': port_id, 'node': node.uuid})
try:
self._api.detach_port_from_node(node.uuid, port_id)
except Exception as exc:
LOG.debug('Failed to remove VIF %(vif)s from node %(node)s, '
'assuming already removed: %(exc)s',
{'vif': port_id, 'node': _utils.log_node(node),
'exc': exc})
LOG.debug('Deleting port %s', port_id)
try:
self._api.delete_port(port_id)
except Exception:
LOG.warning('Failed to delete neutron port %s', port_id)
try:
self._api.release_node(node)
except Exception as exc:
LOG.warning('Failed to remove instance_uuid from node %(node)s, '
'assuming already removed: %(exc)s',
{'node': _utils.log_node(node), 'exc': exc})
def _create_ports(self, node, networks):
"""Create and attach ports on given networks."""
created_ports = []
try:
for network in networks:
port = self._api.create_port(network_id=network.id)
created_ports.append(port.id)
LOG.debug('Created Neutron port %s', port)
self._api.attach_port_to_node(node.uuid, port.id)
LOG.info('Attached port %(port)s to node %(node)s',
{'port': port.id,
'node': _utils.log_node(node)})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Creating and binding ports failed, cleaning up')
try:
self._clean_up(node, created_ports)
except Exception:
LOG.exception('Clean up failed, delete and detach ports '
'%s manually', created_ports)
return created_ports
def unprovision_node(self, node, wait=None):
"""Unprovision a previously provisioned node.
:param node: node object, UUID or name.
:param wait: How many seconds to wait for the process to finish,
None to return immediately.
"""
node = self._api.get_node(node)
self._api.node_action(node.uuid, 'deleted')
LOG.info('Deleting started for node %s', _utils.log_node(node))
if wait is not None:
self._api.ironic.node.wait_for_provision_state(
node.uuid, 'available', timeout=max(0, wait))
self._clean_up(node)
LOG.info('Node %s undeployed successfully', _utils.log_node(node))

237
metalsmith/_scheduler.py Normal file
View File

@ -0,0 +1,237 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
import logging
import six
from metalsmith import _exceptions
from metalsmith import _utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Filter(object):
"""Base class for filters."""
@abc.abstractmethod
def __call__(self, node):
"""Validate this node.
:param node: Node object.
:return: True/False
"""
@abc.abstractmethod
def fail(self):
"""Fail scheduling because no nodes are left.
Must raise an exception.
"""
@six.add_metaclass(abc.ABCMeta)
class Reserver(object):
"""Base class for reservers."""
@abc.abstractmethod
def __call__(self, node):
"""Reserve this node.
:param node: Node object.
:return: updated Node object if it was reserved
:raises: any Exception to indicate that the next node should be tried
"""
@abc.abstractmethod
def fail(self):
"""Fail reservation because no nodes are left.
Must raise an exception.
"""
def schedule_node(nodes, filters, reserver, dry_run=False):
"""Schedule one node.
:param nodes: List of input nodes.
:param filters: List of callable Filter objects to filter/validate nodes.
They are called in passes. If a pass yields no nodes, an error is
raised.
:param reserver: A callable Reserver object. Must return the updated node
or raise an exception.
:param dry_run: If True, reserver is not actually called.
:return: The resulting node
"""
for f in filters:
f_name = f.__class__.__name__
LOG.debug('Running filter %(filter)s on %(count)d node(s)',
{'filter': f_name, 'count': len(nodes)})
nodes = list(filter(f, nodes))
if not nodes:
LOG.debug('Filter %s yielded no nodes', f_name)
f.fail()
assert False, "BUG: %s.fail did not raise" % f_name
LOG.debug('Filter %(filter)s yielded %(count)d node(s)',
{'filter': f_name, 'count': len(nodes)})
if dry_run:
LOG.debug('Dry run, not reserving any nodes')
return nodes[0]
for node in nodes:
try:
return reserver(node)
except Exception as exc:
LOG.debug('Node %(node)s was not reserved (%(exc)s), moving on '
'to the next one',
{'node': _utils.log_node(node), 'exc': exc})
LOG.debug('No nodes could be reserved')
reserver.fail()
assert False, "BUG: %s.fail did not raise" % reserver.__class__.__name__
class CapabilitiesFilter(Filter):
"""Filter that checks capabilities."""
def __init__(self, resource_class, capabilities):
self._resource_class = resource_class
self._capabilities = capabilities
self._counter = collections.Counter()
def __call__(self, node):
try:
caps = _utils.get_capabilities(node)
except Exception:
LOG.exception('Malformed capabilities on node %(node)s: %(caps)s',
{'node': _utils.log_node(node),
'caps': node.properties.get('capabilities')})
return False
LOG.debug('Capabilities for node %(node)s: %(caps)s',
{'node': _utils.log_node(node), 'caps': caps})
for key, value in self._capabilities.items():
try:
node_value = caps[key]
except KeyError:
LOG.debug('Node %(node)s does not have capability %(cap)s',
{'node': _utils.log_node(node), 'cap': key})
return False
else:
self._counter["%s=%s" % (key, node_value)] += 1
if value != node_value:
LOG.debug('Node %(node)s has capability %(cap)s of '
'value "%(node_val)s" instead of "%(expected)s"',
{'node': _utils.log_node(node), 'cap': key,
'node_val': node_value, 'expected': value})
return False
return True
def fail(self):
existing = ", ".join("%s (%d node(s))" % item
for item in self._counter.items())
requested = ', '.join("%s=%s" % item
for item in self._capabilities.items())
message = ("No available nodes found with capabilities %(req)s, "
"existing capabilities: %(exist)s" %
{'req': requested, 'exist': existing or 'none'})
raise _exceptions.CapabilitiesNotFound(message,
self._resource_class,
self._capabilities)
class ValidationFilter(Filter):
"""Filter that runs validation on nodes."""
def __init__(self, api, resource_class, capabilities):
self._api = api
# These are only used for better exceptions
self._resource_class = resource_class
self._capabilities = capabilities
self._failed_validation = []
def __call__(self, node):
try:
self._api.validate_node(node.uuid)
except RuntimeError as exc:
message = ('Node %(node)s failed validation: %(err)s' %
{'node': _utils.log_node(node), 'err': exc})
LOG.warning(message)
self._failed_validation.append(message)
return False
try:
assert int(node.properties['local_gb']) > 0
except KeyError:
message = 'No local_gb for node %s' % _utils.log_node(node)
LOG.warning(message)
self._failed_validation.append(message)
return False
except (TypeError, AssertionError):
message = ('The local_gb for node %(node)s is invalid: '
'expected positive integer, got %(value)s' %
{'node': _utils.log_node(node),
'value': node.properties['local_gb']})
LOG.warning(message)
self._failed_validation.append(message)
return False
return True
def fail(self):
errors = ", ".join(self._failed_validation)
message = "All available nodes have failed validation: %s" % errors
raise _exceptions.ValidationFailed(message,
self._resource_class,
self._capabilities)
class IronicReserver(Reserver):
def __init__(self, api, resource_class, capabilities):
self._api = api
# These are only used for better exceptions
self._resource_class = resource_class
self._capabilities = capabilities
def __call__(self, node):
result = self._api.reserve_node(node, instance_uuid=node.uuid)
# Try validation again to be sure nothing has changed
validator = ValidationFilter(self._api, self._resource_class,
self._capabilities)
if not validator(result):
LOG.warning('Validation of node %s failed after reservation',
_utils.log_node(node))
try:
self._api.release_node(node)
except Exception:
LOG.exception('Failed to release the reserved node %s',
_utils.log_node(node))
validator.fail()
return result
def fail(self):
raise _exceptions.AllNodesReserved(self._resource_class,
self._capabilities)

76
metalsmith/_utils.py Normal file
View File

@ -0,0 +1,76 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import os
import shutil
import tempfile
def log_node(node):
if node.name:
return '%s (UUID %s)' % (node.name, node.uuid)
else:
return node.uuid
def get_capabilities(node):
caps = node.properties.get('capabilities') or {}
if not isinstance(caps, dict):
caps = dict(x.split(':', 1) for x in caps.split(',') if x)
return caps
@contextlib.contextmanager
def config_drive_dir(node, ssh_keys):
d = tempfile.mkdtemp()
try:
metadata = {'public_keys': ssh_keys,
'uuid': node.uuid,
'name': node.name,
'hostname': node.name or node.uuid,
'launch_index': 0,
'availability_zone': '',
'files': [],
'meta': {}}
for version in ('2012-08-10', 'latest'):
subdir = os.path.join(d, 'openstack', version)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(os.path.join(subdir, 'meta_data.json'), 'w') as fp:
print(metadata)
json.dump(metadata, fp)
yield d
finally:
shutil.rmtree(d)
def get_root_disk(root_disk_size, node):
"""Validate and calculate the root disk size."""
if root_disk_size is not None:
if not isinstance(root_disk_size, int):
raise TypeError("The root_disk_size argument must be "
"a positive integer, got %r" % root_disk_size)
elif root_disk_size <= 0:
raise ValueError("The root_disk_size argument must be "
"a positive integer, got %d" % root_disk_size)
else:
# allow for partitioning and config drive
root_disk_size = int(node.properties['local_gb']) - 2
return root_disk_size

View File

@ -1,239 +0,0 @@
# Copyright 2015-2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import logging
import os
import shutil
import tempfile
from ironicclient import exc as ir_exc
from oslo_utils import excutils
from metalsmith import os_api
LOG = logging.getLogger(__name__)
def _log_node(node):
if node.name:
return '%s (UUID %s)' % (node.name, node.uuid)
else:
return node.uuid
def _get_capabilities(node):
return dict(x.split(':', 1) for x in
node.properties.get('capabilities', '').split(',') if x)
@contextlib.contextmanager
def _config_drive_dir(node, ssh_keys):
d = tempfile.mkdtemp()
try:
metadata = {'public_keys': ssh_keys,
'uuid': node.uuid,
'name': node.name,
'hostname': node.name or node.uuid,
'launch_index': 0,
'availability_zone': '',
'files': [],
'meta': {}}
for version in ('2012-08-10', 'latest'):
subdir = os.path.join(d, 'openstack', version)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(os.path.join(subdir, 'meta_data.json'), 'w') as fp:
json.dump(metadata, fp)
yield d
finally:
shutil.rmtree(d)
def reserve(api, nodes, capabilities, dry_run=False):
suitable_nodes = []
for node in nodes:
caps = _get_capabilities(node)
LOG.debug('Capabilities for node %(node)s: %(cap)s',
{'node': _log_node(node), 'cap': caps})
for key, value in capabilities.items():
if caps.get(key) != value:
break
else:
suitable_nodes.append(node)
if not suitable_nodes:
raise RuntimeError('No nodes found with capabilities %s' %
capabilities)
for node in suitable_nodes:
try:
api.validate_node(node.uuid)
except RuntimeError as exc:
LOG.warning('Node %(node)s failed validation: %(err)s',
{'node': _log_node(node), 'err': exc})
continue
if not node.properties.get('local_gb'):
LOG.warning('No local_gb for node %s', _log_node(node))
continue
if dry_run:
LOG.debug('Dry run, assuming node %s reserved', _log_node(node))
return node
else:
try:
return api.update_node(node.uuid, instance_uuid=node.uuid)
except ir_exc.Conflict:
LOG.info('Node %s was occupied, proceeding with the next',
_log_node(node))
raise RuntimeError('Unable to reserve any node')
def clean_up(api, node_uuid, neutron_ports):
try:
api.update_node(node_uuid, instance_uuid=os_api.REMOVE)
except Exception:
LOG.warning('Failed to remove instance_uuid, assuming already removed')
for port_id in neutron_ports:
LOG.debug('Detaching port %(port)s from node %(node)s',
{'port': port_id, 'node': node_uuid})
try:
api.detach_port_from_node(node_uuid, port_id)
except Exception:
LOG.warning('Failed to remove VIF %(vif)s from node %(node)s, '
'assuming already removed',
{'vif': port_id, 'node': node_uuid})
LOG.debug('Deleting port %s', port_id)
try:
api.delete_port(port_id)
except Exception:
LOG.warning('Failed to delete neutron port %s', port_id)
def provision(api, node, network, image, root_disk_size=None,
ssh_keys=None, netboot=False, wait=None):
neutron_ports = []
target_caps = {'boot_option': 'netboot' if netboot else 'local'}
try:
if root_disk_size is None:
root_disk_size = node.properties.get('local_gb')
if not root_disk_size:
raise RuntimeError('No root disk size requested and local_gb '
'is empty')
# allow for partitioning and config drive
root_disk_size = int(root_disk_size) - 2
updates = {'/instance_info/ramdisk': image.ramdisk_id,
'/instance_info/kernel': image.kernel_id,
'/instance_info/image_source': image.id,
'/instance_info/root_gb': root_disk_size,
'/instance_info/capabilities': target_caps}
node = api.update_node(node.uuid, updates)
node_ports = api.list_node_ports(node.uuid)
for node_port in node_ports:
port = api.create_port(mac_address=node_port.address,
network_id=network.id)
neutron_ports.append(port.id)
LOG.debug('Created Neutron port %s', port)
api.attach_port_to_node(node.uuid, port.id)
LOG.info('Ironic port %(node_port)s (%(mac)s) associated with '
'Neutron port %(port)s',
{'node_port': node_port.uuid,
'mac': node_port.address,
'port': port.id})
api.validate_node(node.uuid, validate_deploy=True)
with _config_drive_dir(node, ssh_keys) as cd:
api.node_action(node.uuid, 'active', configdrive=cd)
LOG.info('Provisioning started on node %s', _log_node(node))
if wait is not None:
api.ironic.node.wait_for_provision_state(node.uuid, 'active',
timeout=max(0, wait))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Deploy attempt failed, cleaning up')
try:
clean_up(api, node, neutron_ports)
except Exception:
LOG.exception('Clean up failed, system needs manual clean up')
if wait is not None:
LOG.info('Deploy succeeded on node %s', _log_node(node))
def deploy(api, resource_class, image_id, network_id, root_disk_size,
ssh_keys, capabilities=None, netboot=False,
wait=None, dry_run=False):
"""Deploy an image on a given profile."""
capabilities = capabilities or {}
LOG.debug('Deploying image %(image)s on node with class %(class)s '
'and capabilities %(caps)s on network %(net)s',
{'image': image_id, 'class': resource_class,
'net': network_id, 'capabilities': capabilities})
image = api.get_image_info(image_id)
if image is None:
raise RuntimeError('Image %s does not exist' % image_id)
for im_prop in ('kernel_id', 'ramdisk_id'):
if not getattr(image, im_prop, None):
raise RuntimeError('%s property is required on image' % im_prop)
LOG.debug('Image: %s', image)
network = api.get_network(network_id)
if network is None:
raise RuntimeError('Network %s does not exist' % network_id)
LOG.debug('Network: %s', network)
nodes = api.list_nodes(resource_class=resource_class)
LOG.debug('Ironic nodes: %s', nodes)
if not nodes:
raise RuntimeError('No available nodes found with resource class %s' %
resource_class)
node = reserve(api, nodes, capabilities, dry_run=dry_run)
LOG.info('Reserved node %s', _log_node(node))
if dry_run:
LOG.warning('Dry run, not provisioning node %s', node.uuid)
return
provision(api, node, network, image, root_disk_size, ssh_keys,
netboot=netboot, wait=wait)
def undeploy(api, node_uuid, wait=None):
neutron_ports = [port.id
for port in api.list_node_attached_ports(node_uuid)]
api.node_action(node_uuid, 'deleted')
LOG.info('Deleting started for node %s', node_uuid)
if wait is not None:
api.ironic.node.wait_for_provision_state(node_uuid, 'available',
timeout=max(0, wait))
clean_up(api, node_uuid, neutron_ports)
LOG.info('Node %s undeployed successfully', node_uuid)

136
metalsmith/test/test_cmd.py Normal file
View File

@ -0,0 +1,136 @@
# Copyright 2015-2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import mock
import testtools
from metalsmith import _cmd
from metalsmith import _provisioner
@mock.patch.object(_provisioner, 'Provisioner', autospec=True)
@mock.patch.object(_cmd.generic, 'Password', autospec=True)
class TestMain(testtools.TestCase):
def test_args_ok(self, mock_auth, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg', 'compute']
_cmd.main(args)
mock_pr.assert_called_once_with(mock.ANY, dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
capabilities={}
)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
image_ref='myimg',
network_refs=['mynet'],
root_disk_size=None,
ssh_keys=[],
netboot=False,
wait=1800)
def test_args_debug(self, mock_auth, mock_pr):
args = ['--debug', 'deploy', '--network', 'mynet', '--image', 'myimg',
'compute']
_cmd.main(args)
mock_pr.assert_called_once_with(mock.ANY, dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
capabilities={}
)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
image_ref='myimg',
network_refs=['mynet'],
root_disk_size=None,
ssh_keys=[],
netboot=False,
wait=1800)
def test_args_quiet(self, mock_auth, mock_pr):
args = ['--quiet', 'deploy', '--network', 'mynet', '--image', 'myimg',
'compute']
_cmd.main(args)
mock_pr.assert_called_once_with(mock.ANY, dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
capabilities={}
)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
image_ref='myimg',
network_refs=['mynet'],
root_disk_size=None,
ssh_keys=[],
netboot=False,
wait=1800)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_reservation_failure(self, mock_log, mock_auth, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg', 'compute']
failure = RuntimeError('boom')
mock_pr.return_value.reserve_node.side_effect = failure
self.assertRaises(SystemExit, _cmd.main, args)
mock_log.assert_called_once_with('%s', failure, exc_info=False)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_deploy_failure(self, mock_log, mock_auth, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg', 'compute']
failure = RuntimeError('boom')
mock_pr.return_value.provision_node.side_effect = failure
self.assertRaises(SystemExit, _cmd.main, args)
mock_log.assert_called_once_with('%s', failure, exc_info=False)
def test_args_capabilities(self, mock_auth, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--capability', 'foo=bar', '--capability', 'answer=42',
'compute']
_cmd.main(args)
mock_pr.assert_called_once_with(mock.ANY, dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
capabilities={'foo': 'bar', 'answer': '42'}
)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
image_ref='myimg',
network_refs=['mynet'],
root_disk_size=None,
ssh_keys=[],
netboot=False,
wait=1800)
def test_args_configdrive(self, mock_auth, mock_pr):
with tempfile.NamedTemporaryFile() as fp:
fp.write(b'foo\n')
fp.flush()
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--ssh-public-key', fp.name, 'compute']
_cmd.main(args)
mock_pr.assert_called_once_with(mock.ANY, dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
capabilities={}
)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
image_ref='myimg',
network_refs=['mynet'],
root_disk_size=None,
ssh_keys=['foo'],
netboot=False,
wait=1800)

View File

@ -1,87 +0,0 @@
# Copyright 2015-2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from ironicclient import exc as ir_exc
import mock
from metalsmith import deploy
from metalsmith import os_api
class TestReserve(unittest.TestCase):
def setUp(self):
super(TestReserve, self).setUp()
self.api = mock.Mock(spec=os_api.API)
def test_ok(self):
nodes = [mock.Mock(uuid='1', properties={'local_gb': 42}),
mock.Mock(uuid='2', properties={'local_gb': 42})]
node = deploy.reserve(self.api, nodes, {})
self.assertEqual(self.api.update_node.return_value, node)
self.api.validate_node.assert_called_once_with('1')
self.api.update_node.assert_called_once_with('1', instance_uuid='1')
def test_validation_failed(self):
nodes = [mock.Mock(uuid='1', properties={'local_gb': 42}),
mock.Mock(uuid='2', properties={'local_gb': 42})]
self.api.validate_node.side_effect = [RuntimeError('boom'), None]
node = deploy.reserve(self.api, nodes, {})
self.assertEqual(self.api.update_node.return_value, node)
self.assertEqual([mock.call('1'), mock.call('2')],
self.api.validate_node.call_args_list)
self.api.update_node.assert_called_once_with('2', instance_uuid='2')
def test_with_capabilities(self):
nodes = [mock.Mock(uuid='1', properties={'local_gb': 42}),
mock.Mock(uuid='2', properties={'local_gb': 42,
'capabilities': '1:2,3:4'})]
node = deploy.reserve(self.api, nodes, {'3': '4'})
self.assertEqual(self.api.update_node.return_value, node)
self.api.validate_node.assert_called_once_with('2')
self.api.update_node.assert_called_once_with('2', instance_uuid='2')
def test_no_capabilities(self):
nodes = [mock.Mock(uuid='1', properties={'local_gb': 42}),
mock.Mock(uuid='2', properties={'local_gb': 42,
'capabilities': '1:2,3:4'})]
self.assertRaisesRegexp(RuntimeError,
'No nodes found with capabilities',
deploy.reserve, self.api, nodes, {'3': '5'})
self.assertFalse(self.api.validate_node.called)
self.assertFalse(self.api.update_node.called)
def test_conflict(self):
nodes = [mock.Mock(uuid='1', properties={'local_gb': 42}),
mock.Mock(uuid='2', properties={'local_gb': 42})]
self.api.update_node.side_effect = [ir_exc.Conflict(''), 'node']
node = deploy.reserve(self.api, nodes, {})
self.assertEqual('node', node)
self.assertEqual([mock.call('1'), mock.call('2')],
self.api.validate_node.call_args_list)
self.assertEqual([mock.call('1', instance_uuid='1'),
mock.call('2', instance_uuid='2')],
self.api.update_node.call_args_list)

View File

@ -1,113 +0,0 @@
# Copyright 2015-2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import mock
from metalsmith import main
@mock.patch.object(main.deploy, 'deploy', autospec=True)
@mock.patch.object(main.generic, 'Password', autospec=True)
class TestMain(unittest.TestCase):
def test_args_ok(self, mock_auth, mock_deploy):
args = ['deploy', '--network', 'mynet', '--image', 'myimg', 'compute']
main.main(args)
mock_deploy.assert_called_once_with(mock.ANY,
resource_class='compute',
image_id='myimg',
network_id='mynet',
root_disk_size=None,
ssh_keys=[],
capabilities={},
netboot=False,
wait=1800,
dry_run=False)
def test_args_debug(self, mock_auth, mock_deploy):
args = ['--debug', 'deploy', '--network', 'mynet', '--image', 'myimg',
'compute']
main.main(args)
mock_deploy.assert_called_once_with(mock.ANY,
resource_class='compute',
image_id='myimg',
network_id='mynet',
root_disk_size=None,
ssh_keys=[],
capabilities={},
netboot=False,
wait=1800,
dry_run=False)
def test_args_quiet(self, mock_auth, mock_deploy):
args = ['--quiet', 'deploy', '--network', 'mynet', '--image', 'myimg',
'compute']
main.main(args)
mock_deploy.assert_called_once_with(mock.ANY,
resource_class='compute',
image_id='myimg',
network_id='mynet',
root_disk_size=None,
ssh_keys=[],
capabilities={},
netboot=False,
wait=1800,
dry_run=False)
@mock.patch.object(main.LOG, 'critical', autospec=True)
def test_deploy_failure(self, mock_log, mock_auth, mock_deploy):
args = ['deploy', '--network', 'mynet', '--image', 'myimg', 'compute']
mock_deploy.side_effect = RuntimeError('boom')
self.assertRaises(SystemExit, main.main, args)
mock_log.assert_called_once_with('%s', mock_deploy.side_effect,
exc_info=False)
def test_args_capabilities(self, mock_auth, mock_deploy):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--capability', 'foo=bar', '--capability', 'answer=42',
'compute']
main.main(args)
mock_deploy.assert_called_once_with(mock.ANY,
resource_class='compute',
image_id='myimg',
network_id='mynet',
root_disk_size=None,
ssh_keys=[],
capabilities={'foo': 'bar',
'answer': '42'},
netboot=False,
wait=1800,
dry_run=False)
def test_args_configdrive(self, mock_auth, mock_deploy):
with tempfile.NamedTemporaryFile() as fp:
fp.write(b'foo\n')
fp.flush()
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--ssh-public-key', fp.name, 'compute']
main.main(args)
mock_deploy.assert_called_once_with(mock.ANY,
resource_class='compute',
image_id='myimg',
network_id='mynet',
root_disk_size=None,
ssh_keys=['foo'],
capabilities={},
netboot=False,
wait=1800,
dry_run=False)

View File

@ -0,0 +1,83 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from metalsmith import _exceptions
from metalsmith import _provisioner
class TestReserveNode(testtools.TestCase):
def setUp(self):
super(TestReserveNode, self).setUp()
self.api = mock.Mock(spec=['list_nodes', 'reserve_node',
'validate_node'])
self.pr = _provisioner.Provisioner(mock.Mock())
self.pr._api = self.api
def test_no_nodes(self):
self.api.list_nodes.return_value = []
self.assertRaises(_exceptions.ResourceClassNotFound,
self.pr.reserve_node, 'control')
self.assertFalse(self.api.reserve_node.called)
def test_simple_ok(self):
nodes = [
mock.Mock(spec=['uuid', 'name', 'properties'],
properties={'local_gb': 100})
]
self.api.list_nodes.return_value = nodes
self.api.reserve_node.side_effect = lambda n, instance_uuid: n
node = self.pr.reserve_node('control')
self.assertIn(node, nodes)
def test_with_capabilities(self):
nodes = [
mock.Mock(spec=['uuid', 'name', 'properties'],
properties={'local_gb': 100, 'capabilities': caps})
for caps in ['answer:1', 'answer:42', None]
]
expected = nodes[1]
self.api.list_nodes.return_value = nodes
self.api.reserve_node.side_effect = lambda n, instance_uuid: n
node = self.pr.reserve_node('control', {'answer': '42'})
self.assertIs(node, expected)
class TestProvisionNode(testtools.TestCase):
def setUp(self):
super(TestProvisionNode, self).setUp()
self.api = mock.Mock(spec=['get_node', 'get_image_info', 'get_network',
'update_node', 'validate_node',
'create_port', 'attach_port_to_node',
'node_action', 'wait_for_active'])
self.api.get_node.side_effect = lambda n: n
self.api.update_node.side_effect = lambda n, _u: n
self.pr = _provisioner.Provisioner(mock.Mock())
self.pr._api = self.api
self.node = mock.Mock(spec=['name', 'uuid', 'properties'],
uuid='000', properties={'local_gb': 100})
self.node.name = 'control-0'
def test_ok(self):
self.pr.provision_node(self.node, 'image', ['network'])

View File

@ -0,0 +1,263 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from metalsmith import _exceptions
from metalsmith import _scheduler
class TestScheduleNode(testtools.TestCase):
def setUp(self):
super(TestScheduleNode, self).setUp()
self.nodes = [mock.Mock(spec=['uuid', 'name']) for _ in range(2)]
self.reserver = self._reserver(lambda x: x)
def _reserver(self, side_effect):
reserver = mock.Mock(spec=_scheduler.Reserver)
reserver.side_effect = side_effect
if isinstance(side_effect, Exception):
reserver.fail.side_effect = RuntimeError('failed')
else:
reserver.fail.side_effect = AssertionError('called fail')
return reserver
def _filter(self, side_effect, fail=AssertionError('called fail')):
fltr = mock.Mock(spec=_scheduler.Filter)
fltr.side_effect = side_effect
fltr.fail.side_effect = fail
return fltr
def test_no_filters(self):
result = _scheduler.schedule_node(self.nodes, [], self.reserver)
self.assertIs(result, self.nodes[0])
self.reserver.assert_called_once_with(self.nodes[0])
self.assertFalse(self.reserver.fail.called)
def test_dry_run(self):
result = _scheduler.schedule_node(self.nodes, [], self.reserver,
dry_run=True)
self.assertIs(result, self.nodes[0])
self.assertFalse(self.reserver.called)
self.assertFalse(self.reserver.fail.called)
def test_reservation_one_failed(self):
reserver = self._reserver([Exception("boom"), self.nodes[1]])
result = _scheduler.schedule_node(self.nodes, [], reserver)
self.assertIs(result, self.nodes[1])
self.assertEqual([mock.call(n) for n in self.nodes],
reserver.call_args_list)
def test_reservation_all_failed(self):
reserver = self._reserver(Exception("boom"))
self.assertRaisesRegex(RuntimeError, 'failed',
_scheduler.schedule_node,
self.nodes, [], reserver)
self.assertEqual([mock.call(n) for n in self.nodes],
reserver.call_args_list)
def test_all_filters_pass(self):
filters = [self._filter([True, True]) for _ in range(3)]
result = _scheduler.schedule_node(self.nodes, filters, self.reserver)
self.assertIs(result, self.nodes[0])
self.reserver.assert_called_once_with(self.nodes[0])
for fltr in filters:
self.assertEqual([mock.call(n) for n in self.nodes],
fltr.call_args_list)
self.assertFalse(fltr.fail.called)
def test_one_node_filtered(self):
filters = [self._filter([True, True]),
self._filter([False, True]),
self._filter([True])]
result = _scheduler.schedule_node(self.nodes, filters, self.reserver)
self.assertIs(result, self.nodes[1])
self.reserver.assert_called_once_with(self.nodes[1])
for fltr in filters:
self.assertFalse(fltr.fail.called)
for fltr in filters[:2]:
self.assertEqual([mock.call(n) for n in self.nodes],
fltr.call_args_list)
filters[2].assert_called_once_with(self.nodes[1])
def test_all_nodes_filtered(self):
filters = [self._filter([True, True]),
self._filter([False, True]),
self._filter([False], fail=RuntimeError('failed'))]
self.assertRaisesRegex(RuntimeError, 'failed',
_scheduler.schedule_node,
self.nodes, filters, self.reserver)
self.assertFalse(self.reserver.called)
for fltr in filters[:2]:
self.assertEqual([mock.call(n) for n in self.nodes],
fltr.call_args_list)
self.assertFalse(fltr.fail.called)
filters[2].assert_called_once_with(self.nodes[1])
filters[2].fail.assert_called_once_with()
class TestCapabilitiesFilter(testtools.TestCase):
def test_fail_no_capabilities(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute'})
self.assertRaisesRegex(_exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: none',
fltr.fail)
def test_nothing_requested_nothing_found(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {})
node = mock.Mock(properties={}, spec=['properties', 'name', 'uuid'])
self.assertTrue(fltr(node))
def test_matching_node(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute',
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,profile:compute,answer:42'},
spec=['properties', 'name', 'uuid'])
self.assertTrue(fltr(node))
def test_not_matching_node(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute',
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,answer:42'},
spec=['properties', 'name', 'uuid'])
self.assertFalse(fltr(node))
def test_fail_message(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute'})
node = mock.Mock(
properties={'capabilities': 'profile:control'},
spec=['properties', 'name', 'uuid'])
self.assertFalse(fltr(node))
self.assertRaisesRegex(_exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: '
r'profile=control \(1 node\(s\)\)',
fltr.fail)
def test_malformed_capabilities(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute'})
for cap in ['foo,profile:control', 42, 'a:b:c']:
node = mock.Mock(properties={'capabilities': cap},
spec=['properties', 'name', 'uuid'])
self.assertFalse(fltr(node))
self.assertRaisesRegex(_exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: none',
fltr.fail)
class TestValidationFilter(testtools.TestCase):
def setUp(self):
super(TestValidationFilter, self).setUp()
self.api = mock.Mock(spec=['validate_node'])
self.fltr = _scheduler.ValidationFilter(self.api, 'rsc',
{'profile': 'compute'})
def test_pass(self):
node = mock.Mock(properties={'local_gb': 100},
spec=['properties', 'uuid', 'name'])
self.assertTrue(self.fltr(node))
def test_fail_without_local_gb(self):
node = mock.Mock(properties={},
spec=['properties', 'uuid', 'name'])
self.assertFalse(self.fltr(node))
self.assertRaisesRegex(_exceptions.ValidationFailed,
'All available nodes have failed validation: '
'No local_gb for node',
self.fltr.fail)
def test_fail_malformed_local_gb(self):
node = mock.Mock(properties={'local_gb': []},
spec=['properties', 'uuid', 'name'])
self.assertFalse(self.fltr(node))
self.assertRaisesRegex(_exceptions.ValidationFailed,
'All available nodes have failed validation: '
'The local_gb for node .* is invalid',
self.fltr.fail)
def test_fail_validation(self):
node = mock.Mock(properties={'local_gb': 100},
spec=['properties', 'uuid', 'name'])
self.api.validate_node.side_effect = RuntimeError('boom')
self.assertFalse(self.fltr(node))
self.assertRaisesRegex(_exceptions.ValidationFailed,
'All available nodes have failed validation: '
'Node .* failed validation: boom',
self.fltr.fail)
@mock.patch.object(_scheduler, 'ValidationFilter', autospec=True)
class TestIronicReserver(testtools.TestCase):
def setUp(self):
super(TestIronicReserver, self).setUp()
self.node = mock.Mock(spec=['uuid', 'name'])
self.api = mock.Mock(spec=['reserve_node', 'release_node'])
self.api.reserve_node.side_effect = lambda node, instance_uuid: node
self.reserver = _scheduler.IronicReserver(self.api, 'rsc', {})
def test_fail(self, mock_validation):
self.assertRaisesRegex(_exceptions.AllNodesReserved,
'All the candidate nodes are already reserved',
self.reserver.fail)
def test_ok(self, mock_validation):
self.assertEqual(self.node, self.reserver(self.node))
self.api.reserve_node.assert_called_once_with(
self.node, instance_uuid=self.node.uuid)
mock_validation.return_value.assert_called_once_with(self.node)
def test_reservation_failed(self, mock_validation):
self.api.reserve_node.side_effect = RuntimeError('conflict')
self.assertRaisesRegex(RuntimeError, 'conflict',
self.reserver, self.node)
self.api.reserve_node.assert_called_once_with(
self.node, instance_uuid=self.node.uuid)
self.assertFalse(mock_validation.return_value.called)
def test_validation_failed(self, mock_validation):
mock_validation.return_value.return_value = False
mock_validation.return_value.fail.side_effect = RuntimeError('fail')
self.assertRaisesRegex(RuntimeError, 'fail',
self.reserver, self.node)
self.api.reserve_node.assert_called_once_with(
self.node, instance_uuid=self.node.uuid)
mock_validation.return_value.assert_called_once_with(self.node)
self.api.release_node.assert_called_once_with(self.node)
@mock.patch.object(_scheduler.LOG, 'exception', autospec=True)
def test_validation_and_release_failed(self, mock_log_exc,
mock_validation):
mock_validation.return_value.return_value = False
mock_validation.return_value.fail.side_effect = RuntimeError('fail')
self.api.release_node.side_effect = Exception()
self.assertRaisesRegex(RuntimeError, 'fail',
self.reserver, self.node)
self.api.reserve_node.assert_called_once_with(
self.node, instance_uuid=self.node.uuid)
mock_validation.return_value.assert_called_once_with(self.node)
self.api.release_node.assert_called_once_with(self.node)
self.assertTrue(mock_log_exc.called)

View File

@ -7,3 +7,4 @@ oslo.utils>=3.20.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
python-ironicclient>=1.14.0 # Apache-2.0
python-neutronclient>=6.3.0 # Apache-2.0
six>=1.10.0 # MIT

View File

@ -19,4 +19,4 @@ packages =
[entry_points]
console_scripts =
metalsmith = metalsmith.main:main
metalsmith = metalsmith._cmd:main

View File

@ -2,7 +2,8 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
coverage!=4.4,>=4.0 # Apache-2.0
doc8 # Apache-2.0
flake8-import-order==0.11 # LGPLv3
doc8>=0.6.0 # Apache-2.0
flake8-import-order>=0.13 # LGPLv3
hacking>=1.0.0 # Apache-2.0
mock>=2.0 # BSD
testtools>=2.2.0 # MIT

View File

@ -1,5 +1,5 @@
[tox]
envlist = py27,py35,pep8
envlist = py3,py27,pep8
[testenv]
usedevelop = True
@ -10,13 +10,13 @@ commands =
coverage report -m
setenv = PYTHONDONTWRITEBYTECODE=1
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY \
OS_USERNAME OS_PASSWORD OS_TENANT_NAME OS_AUTH_URL
OS_USERNAME OS_PASSWORD OS_PROJECT_NAME OS_AUTH_URL \
OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_NAME
[testenv:venv]
commands = {posargs}
[testenv:pep8]
basepython = python2
commands =
flake8 metalsmith
doc8 README.rst
@ -32,6 +32,3 @@ application-import-names = metalsmith
# [H210] Require autospec, spec, or spec_set in mock.patch/mock.patch.object calls
# [H904] Delay string interpolations at logging calls.
enable-extensions=H106,H203,H204,H205,H210,H904
[hacking]
import_exceptions = ironicclient.exceptions