Remove all mistral actions from tripleo-common
This removes dependency with msitral-lib. Depends-On: https://review.opendev.org/c/openstack/tripleo-ansible/+/778018 Depends-On: https://review.opendev.org/c/openstack/python-tripleoclient/+/777950 Change-Id: I3c94845b2aa484ac7b4e8f444509ceac40fa289d
This commit is contained in:
parent
50a24d529b
commit
35c509602e
|
@ -13,10 +13,8 @@ oslo.utils>=3.33.0 # Apache-2.0
|
|||
python-glanceclient>=2.8.0 # Apache-2.0
|
||||
python-ironicclient!=2.5.2,!=2.7.1,!=3.0.0,>=2.3.0 # Apache-2.0
|
||||
six>=1.10.0 # MIT
|
||||
mistral-lib>=0.3.0 # Apache-2.0
|
||||
oslo.concurrency>=3.26.0 # Apache-2.0
|
||||
python-ironic-inspector-client>=1.5.0 # Apache-2.0
|
||||
python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0
|
||||
Jinja2>=2.10 # BSD License (3 clause)
|
||||
python-novaclient>=9.1.0 # Apache-2.0
|
||||
passlib>=1.7.0 # BSD
|
||||
|
|
|
@ -1,430 +0,0 @@
|
|||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import logging
|
||||
import socket
|
||||
import tempfile
|
||||
|
||||
import ironic_inspector_client
|
||||
from mistral_lib import actions
|
||||
import netaddr
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from tripleo_common.actions import base
|
||||
from tripleo_common import exception
|
||||
from tripleo_common.utils import glance
|
||||
from tripleo_common.utils import nodes
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RegisterOrUpdateNodes(base.TripleOAction):
|
||||
"""Register Nodes Action
|
||||
|
||||
:param nodes_json: list of nodes & attributes in json format
|
||||
:param remove: Should nodes not in the list be removed?
|
||||
:param kernel_name: Glance ID of the kernel to use for the nodes.
|
||||
:param ramdisk_name: Glance ID of the ramdisk to use for the nodes.
|
||||
:param instance_boot_option: Whether to set instances for booting from
|
||||
local hard drive (local) or network (netboot).
|
||||
:return: list of node objects representing the new nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, nodes_json, remove=False, kernel_name=None,
|
||||
ramdisk_name=None, instance_boot_option=None):
|
||||
super(RegisterOrUpdateNodes, self).__init__()
|
||||
self.nodes_json = nodes.convert_nodes_json_mac_to_ports(nodes_json)
|
||||
self.remove = remove
|
||||
self.instance_boot_option = instance_boot_option
|
||||
self.kernel_name = kernel_name
|
||||
self.ramdisk_name = ramdisk_name
|
||||
|
||||
def run(self, context):
|
||||
for node in self.nodes_json:
|
||||
caps = node.get('capabilities', {})
|
||||
caps = nodes.capabilities_to_dict(caps)
|
||||
if self.instance_boot_option is not None:
|
||||
caps.setdefault('boot_option', self.instance_boot_option)
|
||||
node['capabilities'] = nodes.dict_to_capabilities(caps)
|
||||
|
||||
baremetal_client = self.get_baremetal_client(context)
|
||||
image_client = self.get_image_client(context)
|
||||
|
||||
try:
|
||||
return nodes.register_all_nodes(
|
||||
self.nodes_json,
|
||||
client=baremetal_client,
|
||||
remove=self.remove,
|
||||
glance_client=image_client,
|
||||
kernel_name=self.kernel_name,
|
||||
ramdisk_name=self.ramdisk_name)
|
||||
except Exception as err:
|
||||
LOG.exception("Error registering nodes with ironic.")
|
||||
return actions.Result(error=six.text_type(err))
|
||||
|
||||
|
||||
class ValidateNodes(base.TripleOAction):
|
||||
"""Validate Nodes Action
|
||||
|
||||
:param nodes_json: list of nodes & attributes in json format
|
||||
"""
|
||||
|
||||
def __init__(self, nodes_json):
|
||||
super(ValidateNodes, self).__init__()
|
||||
self.nodes_json = nodes.convert_nodes_json_mac_to_ports(nodes_json)
|
||||
|
||||
def run(self, context):
|
||||
try:
|
||||
nodes.validate_nodes(self.nodes_json)
|
||||
except exception.InvalidNode as err:
|
||||
LOG.error("Validation of nodes failed: %s", err)
|
||||
return actions.Result(error=str(err))
|
||||
except Exception as err:
|
||||
LOG.exception("Unexpected exception during node validation")
|
||||
return actions.Result(error=str(err))
|
||||
|
||||
|
||||
class ConfigureBootAction(base.TripleOAction):
|
||||
"""Configure kernel and ramdisk.
|
||||
|
||||
:param node_uuid: an Ironic node UUID
|
||||
:param kernel_name: Glance name of the kernel to use for the nodes.
|
||||
:param ramdisk_name: Glance name of the ramdisk to use for the nodes.
|
||||
:param instance_boot_option: Whether to set instances for booting from
|
||||
local hard drive (local) or network (netboot).
|
||||
"""
|
||||
|
||||
def __init__(self, node_uuid, kernel_name='bm-deploy-kernel',
|
||||
ramdisk_name='bm-deploy-ramdisk', instance_boot_option=None):
|
||||
super(ConfigureBootAction, self).__init__()
|
||||
self.node_uuid = node_uuid
|
||||
self.kernel_name = kernel_name
|
||||
self.ramdisk_name = ramdisk_name
|
||||
self.instance_boot_option = instance_boot_option
|
||||
|
||||
def run(self, context):
|
||||
baremetal_client = self.get_baremetal_client(context)
|
||||
image_client = self.get_image_client(context)
|
||||
return self.configure_boot(baremetal_client, image_client)
|
||||
|
||||
def configure_boot(self, baremetal_client, image_client):
|
||||
|
||||
try:
|
||||
image_ids = {'kernel': None, 'ramdisk': None}
|
||||
if self.kernel_name is not None and self.ramdisk_name is not None:
|
||||
image_ids = glance.create_or_find_kernel_and_ramdisk(
|
||||
image_client, self.kernel_name, self.ramdisk_name)
|
||||
|
||||
node = baremetal_client.node.get(self.node_uuid)
|
||||
|
||||
capabilities = node.properties.get('capabilities', {})
|
||||
capabilities = nodes.capabilities_to_dict(capabilities)
|
||||
if self.instance_boot_option is not None:
|
||||
capabilities['boot_option'] = self.instance_boot_option
|
||||
capabilities = nodes.dict_to_capabilities(capabilities)
|
||||
|
||||
baremetal_client.node.update(node.uuid, [
|
||||
{
|
||||
'op': 'add',
|
||||
'path': '/properties/capabilities',
|
||||
'value': capabilities,
|
||||
},
|
||||
{
|
||||
'op': 'add',
|
||||
'path': '/driver_info/deploy_ramdisk',
|
||||
'value': image_ids['ramdisk'],
|
||||
},
|
||||
{
|
||||
'op': 'add',
|
||||
'path': '/driver_info/deploy_kernel',
|
||||
'value': image_ids['kernel'],
|
||||
},
|
||||
{
|
||||
'op': 'add',
|
||||
'path': '/driver_info/rescue_ramdisk',
|
||||
'value': image_ids['ramdisk'],
|
||||
},
|
||||
{
|
||||
'op': 'add',
|
||||
'path': '/driver_info/rescue_kernel',
|
||||
'value': image_ids['kernel'],
|
||||
},
|
||||
])
|
||||
LOG.debug("Configuring boot option for Node %s", self.node_uuid)
|
||||
except Exception as err:
|
||||
LOG.exception("Error configuring node boot options with Ironic.")
|
||||
return actions.Result(error=six.text_type(err))
|
||||
|
||||
|
||||
class ConfigureRootDeviceAction(base.TripleOAction):
|
||||
"""Configure the root device strategy.
|
||||
|
||||
:param node_uuid: an Ironic node UUID
|
||||
:param root_device: Define the root device for nodes. Can be either a list
|
||||
of device names (without /dev) to choose from or one
|
||||
of two strategies: largest or smallest. For it to work
|
||||
this command should be run after the introspection.
|
||||
:param minimum_size: Minimum size (in GiB) of the detected root device.
|
||||
:param overwrite: Whether to overwrite existing root device hints when
|
||||
root-device is set.
|
||||
"""
|
||||
|
||||
def __init__(self, node_uuid, root_device=None, minimum_size=4,
|
||||
overwrite=False):
|
||||
super(ConfigureRootDeviceAction, self).__init__()
|
||||
self.node_uuid = node_uuid
|
||||
self.root_device = root_device
|
||||
self.minimum_size = minimum_size
|
||||
self.overwrite = overwrite
|
||||
|
||||
def run(self, context):
|
||||
if not self.root_device:
|
||||
return
|
||||
|
||||
baremetal_client = self.get_baremetal_client(context)
|
||||
inspector_client = self.get_baremetal_introspection_client(context)
|
||||
|
||||
return self.configure_root_device(baremetal_client, inspector_client)
|
||||
|
||||
def configure_root_device(self, baremetal_client, inspector_client):
|
||||
node = baremetal_client.node.get(self.node_uuid)
|
||||
self._apply_root_device_strategy(
|
||||
node,
|
||||
self.root_device,
|
||||
self.minimum_size,
|
||||
self.overwrite,
|
||||
baremetal_client, inspector_client)
|
||||
|
||||
def _apply_root_device_strategy(self, node, strategy, minimum_size,
|
||||
overwrite=False, baremetal_client=None,
|
||||
inspector_client=None):
|
||||
if node.properties.get('root_device') and not overwrite:
|
||||
# This is a correct situation, we still want to allow people to
|
||||
# fine-tune the root device setting for a subset of nodes.
|
||||
# However, issue a warning, so that they know which nodes were not
|
||||
# updated during this run.
|
||||
LOG.warning('Root device hints are already set for node %s '
|
||||
'and overwriting is not requested, skipping',
|
||||
node.uuid)
|
||||
LOG.warning('You may unset them by running $ ironic '
|
||||
'node-update %s remove properties/root_device',
|
||||
node.uuid)
|
||||
return
|
||||
|
||||
try:
|
||||
data = inspector_client.get_data(node.uuid)
|
||||
except ironic_inspector_client.ClientError:
|
||||
raise exception.RootDeviceDetectionError(
|
||||
'No introspection data found for node %s, '
|
||||
'root device cannot be detected' % node.uuid)
|
||||
except AttributeError:
|
||||
raise RuntimeError('Ironic inspector client version 1.2.0 or '
|
||||
'newer is required for detecting root device')
|
||||
|
||||
try:
|
||||
disks = data['inventory']['disks']
|
||||
except KeyError:
|
||||
raise exception.RootDeviceDetectionError(
|
||||
'Malformed introspection data for node %s: '
|
||||
'disks list is missing' % node.uuid)
|
||||
|
||||
minimum_size *= units.Gi
|
||||
disks = [d for d in disks if d.get('size', 0) >= minimum_size]
|
||||
|
||||
if not disks:
|
||||
raise exception.RootDeviceDetectionError(
|
||||
'No suitable disks found for node %s' % node.uuid)
|
||||
|
||||
if strategy == 'smallest':
|
||||
disks.sort(key=lambda d: d['size'])
|
||||
root_device = disks[0]
|
||||
elif strategy == 'largest':
|
||||
disks.sort(key=lambda d: d['size'], reverse=True)
|
||||
root_device = disks[0]
|
||||
else:
|
||||
disk_names = [x.strip() for x in strategy.split(',')]
|
||||
disks = {d['name']: d for d in disks}
|
||||
for candidate in disk_names:
|
||||
try:
|
||||
root_device = disks['/dev/%s' % candidate]
|
||||
except KeyError:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise exception.RootDeviceDetectionError(
|
||||
'Cannot find a disk with any of names %(strategy)s '
|
||||
'for node %(node)s' %
|
||||
{'strategy': strategy, 'node': node.uuid})
|
||||
|
||||
hint = None
|
||||
for hint_name in ('wwn_with_extension', 'wwn', 'serial'):
|
||||
if root_device.get(hint_name):
|
||||
hint = {hint_name: root_device[hint_name]}
|
||||
break
|
||||
|
||||
if hint is None:
|
||||
# I don't think it might actually happen, but just in case
|
||||
raise exception.RootDeviceDetectionError(
|
||||
'Neither WWN nor serial number are known for device %(dev)s '
|
||||
'on node %(node)s; root device hints cannot be used' %
|
||||
{'dev': root_device['name'], 'node': node.uuid})
|
||||
|
||||
# During the introspection process we got local_gb assigned according
|
||||
# to the default strategy. Now we need to update it.
|
||||
new_size = root_device['size'] / units.Gi
|
||||
# This -1 is what we always do to account for partitioning
|
||||
new_size -= 1
|
||||
|
||||
baremetal_client.node.update(
|
||||
node.uuid,
|
||||
[{'op': 'add', 'path': '/properties/root_device', 'value': hint},
|
||||
{'op': 'add', 'path': '/properties/local_gb', 'value': new_size}])
|
||||
|
||||
LOG.info('Updated root device for node %(node)s, new device '
|
||||
'is %(dev)s, new local_gb is %(local_gb)d',
|
||||
{'node': node.uuid, 'dev': root_device, 'local_gb': new_size})
|
||||
|
||||
|
||||
class GetCandidateNodes(base.TripleOAction):
|
||||
"""Given IPs, ports and credentials, return potential new nodes."""
|
||||
|
||||
def __init__(self, ip_addresses, ports, credentials, existing_nodes):
|
||||
self.ip_addresses = ip_addresses
|
||||
self.ports = ports
|
||||
self.credentials = credentials
|
||||
self.existing_nodes = existing_nodes
|
||||
|
||||
def _existing_ips(self):
|
||||
result = set()
|
||||
|
||||
for node in self.existing_nodes:
|
||||
try:
|
||||
handler = nodes.find_driver_handler(node['driver'])
|
||||
except exception.InvalidNode:
|
||||
LOG.warning('No known handler for driver %(driver)s of '
|
||||
'node %(node)s, ignoring it',
|
||||
{'driver': node['driver'], 'node': node['uuid']})
|
||||
continue
|
||||
|
||||
address_field = handler.convert_key('pm_addr')
|
||||
if address_field is None:
|
||||
LOG.info('No address field for driver %(driver)s of '
|
||||
'node %(node)s, ignoring it',
|
||||
{'driver': node['driver'], 'node': node['uuid']})
|
||||
continue
|
||||
|
||||
address = node['driver_info'].get(address_field)
|
||||
if address is None:
|
||||
LOG.warning('No address for node %(node)s, ignoring it',
|
||||
{'node': node['uuid']})
|
||||
continue
|
||||
|
||||
try:
|
||||
ip = socket.gethostbyname(address)
|
||||
except socket.gaierror as exc:
|
||||
LOG.warning('Cannot resolve %(field)s "%(value)s" '
|
||||
'for node %(node)s: %(error)s',
|
||||
{'field': address_field, 'value': address,
|
||||
'node': node['uuid'], 'error': exc})
|
||||
continue
|
||||
|
||||
port_field = handler.convert_key('pm_port')
|
||||
port = node['driver_info'].get(port_field, handler.default_port)
|
||||
if port is not None:
|
||||
port = int(port)
|
||||
|
||||
LOG.debug('Detected existing BMC at %s with port %s', ip, port)
|
||||
result.add((ip, port))
|
||||
|
||||
return result
|
||||
|
||||
def _ip_address_list(self):
|
||||
if isinstance(self.ip_addresses, six.string_types):
|
||||
return [str(ip) for ip in
|
||||
netaddr.IPNetwork(self.ip_addresses).iter_hosts()]
|
||||
return self.ip_addresses
|
||||
|
||||
def run(self, context):
|
||||
existing = self._existing_ips()
|
||||
try:
|
||||
ip_addresses = self._ip_address_list()
|
||||
except netaddr.AddrFormatError as exc:
|
||||
LOG.error("Cannot parse network address: %s", exc)
|
||||
return actions.Result(
|
||||
error="%s: %s" % (type(exc).__name__, str(exc))
|
||||
)
|
||||
|
||||
result = []
|
||||
# NOTE(dtantsur): we iterate over IP addresses last to avoid
|
||||
# spamming the same BMC with too many requests in a row.
|
||||
for username, password in self.credentials:
|
||||
for port in self.ports:
|
||||
port = int(port)
|
||||
for ip in ip_addresses:
|
||||
if (ip, port) in existing or (ip, None) in existing:
|
||||
LOG.info('Skipping existing node %s:%s', ip, port)
|
||||
continue
|
||||
|
||||
result.append({'ip': ip, 'username': username,
|
||||
'password': password, 'port': port})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class ProbeNode(base.TripleOAction):
|
||||
"""Try to find BMCs on the given IP."""
|
||||
|
||||
def __init__(self, ip, port, username, password,
|
||||
attempts=2, ipmi_driver='ipmi'):
|
||||
super(ProbeNode, self).__init__()
|
||||
self.ip = ip
|
||||
self.port = int(port)
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.attempts = attempts
|
||||
self.ipmi_driver = ipmi_driver
|
||||
|
||||
def run(self, context):
|
||||
# TODO(dtantsur): redfish support
|
||||
LOG.debug('Probing for IPMI BMC: %s@%s:%s',
|
||||
self.username, self.ip, self.port)
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='wt') as fp:
|
||||
fp.write(self.password or '\0')
|
||||
fp.flush()
|
||||
|
||||
try:
|
||||
# TODO(dtantsur): try also IPMI v1.5
|
||||
processutils.execute('ipmitool', '-I', 'lanplus',
|
||||
'-H', self.ip, '-L', 'ADMINISTRATOR',
|
||||
'-p', str(self.port), '-U', self.username,
|
||||
'-f', fp.name, 'power', 'status',
|
||||
attempts=self.attempts)
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
LOG.debug('Probing %(ip)s failed: %(exc)s',
|
||||
{'ip': self.ip, 'exc': exc})
|
||||
return None
|
||||
|
||||
LOG.info('Found a BMC on %(ip)s with user %(user)s',
|
||||
{'ip': self.ip, 'user': self.username})
|
||||
return {
|
||||
'pm_type': self.ipmi_driver,
|
||||
'pm_addr': self.ip,
|
||||
'pm_user': self.username,
|
||||
'pm_password': self.password,
|
||||
'pm_port': self.port,
|
||||
}
|
|
@ -1,182 +0,0 @@
|
|||
# Copyright 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from glanceclient.v2 import client as glanceclient
|
||||
from heatclient.v1 import client as heatclient
|
||||
import ironic_inspector_client
|
||||
from ironicclient import client as ironicclient
|
||||
from keystoneauth1 import session as ks_session
|
||||
from keystoneauth1.token_endpoint import Token
|
||||
from mistral_lib import actions
|
||||
from mistralclient.api import client as mistral_client
|
||||
from novaclient.client import Client as nova_client
|
||||
from swiftclient import client as swift_client
|
||||
from swiftclient import service as swift_service
|
||||
from zaqarclient.queues.v2 import client as zaqarclient
|
||||
|
||||
from tripleo_common.utils import keystone as keystone_utils
|
||||
|
||||
|
||||
class TripleOAction(actions.Action):
|
||||
|
||||
def __init__(self):
|
||||
super(TripleOAction, self).__init__()
|
||||
|
||||
def get_session(self, context, service_name):
|
||||
session_and_auth = keystone_utils.get_session_and_auth(
|
||||
context,
|
||||
service_name=service_name
|
||||
)
|
||||
return session_and_auth['session']
|
||||
|
||||
def get_object_client(self, context):
|
||||
security_ctx = context.security
|
||||
|
||||
swift_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
security_ctx,
|
||||
'swift'
|
||||
)
|
||||
|
||||
kwargs = {
|
||||
'preauthurl': swift_endpoint.url % {
|
||||
'tenant_id': security_ctx.project_id
|
||||
},
|
||||
'session': self.get_session(security_ctx, 'swift'),
|
||||
'insecure': security_ctx.insecure,
|
||||
'retries': 10,
|
||||
'starting_backoff': 3,
|
||||
'max_backoff': 120
|
||||
}
|
||||
return swift_client.Connection(**kwargs)
|
||||
|
||||
# This version returns the SwiftService API
|
||||
def get_object_service(self, context):
|
||||
swift_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
context, 'swift')
|
||||
|
||||
swift_opts = {
|
||||
'os_storage_url': swift_endpoint.url % {
|
||||
'tenant_id': context.project_id
|
||||
},
|
||||
'os_auth_token': context.auth_token,
|
||||
'os_region_name': swift_endpoint.region,
|
||||
'os_project_id': context.security.project_id,
|
||||
}
|
||||
|
||||
return swift_service.SwiftService(options=swift_opts)
|
||||
|
||||
def get_baremetal_client(self, context):
|
||||
security_ctx = context.security
|
||||
ironic_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
security_ctx, 'ironic')
|
||||
|
||||
return ironicclient.get_client(
|
||||
1,
|
||||
endpoint=ironic_endpoint.url,
|
||||
token=security_ctx.auth_token,
|
||||
region_name=ironic_endpoint.region,
|
||||
# 1.58 for allocations backfill
|
||||
os_ironic_api_version='1.58',
|
||||
# FIXME(lucasagomes):Paramtetize max_retries and
|
||||
# max_interval. At the moment since we are dealing with
|
||||
# a critical bug (#1612622) let's just hardcode the times
|
||||
# here since the right fix does involve multiple projects
|
||||
# (tripleo-ci and python-tripleoclient beyong tripleo-common)
|
||||
max_retries=12,
|
||||
retry_interval=5,
|
||||
)
|
||||
|
||||
def get_baremetal_introspection_client(self, context):
|
||||
security_ctx = context.security
|
||||
bmi_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
security_ctx, 'ironic-inspector')
|
||||
|
||||
auth = Token(endpoint=bmi_endpoint.url, token=security_ctx.auth_token)
|
||||
|
||||
return ironic_inspector_client.ClientV1(
|
||||
api_version='1.2',
|
||||
region_name=bmi_endpoint.region,
|
||||
session=ks_session.Session(auth)
|
||||
)
|
||||
|
||||
def get_image_client(self, context):
|
||||
security_ctx = context.security
|
||||
try:
|
||||
glance_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
security_ctx, 'glance')
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
return glanceclient.Client(
|
||||
glance_endpoint.url,
|
||||
token=security_ctx.auth_token,
|
||||
region_name=glance_endpoint.region
|
||||
)
|
||||
|
||||
def get_orchestration_client(self, context):
|
||||
security_ctx = context.security
|
||||
heat_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
security_ctx, 'heat')
|
||||
|
||||
endpoint_url = keystone_utils.format_url(
|
||||
heat_endpoint.url,
|
||||
{'tenant_id': security_ctx.project_id}
|
||||
)
|
||||
|
||||
return heatclient.Client(
|
||||
endpoint_url,
|
||||
region_name=heat_endpoint.region,
|
||||
token=security_ctx.auth_token,
|
||||
username=security_ctx.user_name
|
||||
)
|
||||
|
||||
def get_messaging_client(self, context):
|
||||
zaqar_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
context, service_type='messaging')
|
||||
|
||||
auth_uri = context.security.auth_uri or \
|
||||
keystone_utils.CONF.keystone_authtoken.auth_uri
|
||||
|
||||
opts = {
|
||||
'os_auth_token': context.security.auth_token,
|
||||
'os_auth_url': auth_uri,
|
||||
'os_project_id': context.security.project_id,
|
||||
'insecure': context.security.insecure,
|
||||
}
|
||||
auth_opts = {'backend': 'keystone', 'options': opts, }
|
||||
conf = {'auth_opts': auth_opts,
|
||||
'session': self.get_session(context, 'zaqar')}
|
||||
|
||||
return zaqarclient.Client(zaqar_endpoint.url, conf=conf)
|
||||
|
||||
def get_workflow_client(self, context):
|
||||
security_ctx = context.security
|
||||
mistral_endpoint = keystone_utils.get_endpoint_for_project(
|
||||
security_ctx, 'mistral')
|
||||
|
||||
mc = mistral_client.client(auth_token=security_ctx.auth_token,
|
||||
mistral_url=mistral_endpoint.url)
|
||||
|
||||
return mc
|
||||
|
||||
def get_compute_client(self, context):
|
||||
security_ctx = context.security
|
||||
|
||||
conf = keystone_utils.get_session_and_auth(
|
||||
security_ctx,
|
||||
service_type='compute'
|
||||
)
|
||||
|
||||
return nova_client(2, **conf)
|
|
@ -1,492 +0,0 @@
|
|||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from unittest import mock
|
||||
|
||||
from glanceclient import exc as glance_exceptions
|
||||
import ironic_inspector_client
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_utils import units
|
||||
|
||||
from tripleo_common.actions import baremetal
|
||||
from tripleo_common import exception
|
||||
from tripleo_common.tests import base
|
||||
|
||||
|
||||
class TestConfigureBootAction(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestConfigureBootAction, self).setUp()
|
||||
self.node_update = [{'op': 'add',
|
||||
'path': '/properties/capabilities',
|
||||
'value': 'boot_option:local'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/deploy_ramdisk',
|
||||
'value': 'r_id'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/deploy_kernel',
|
||||
'value': 'k_id'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/rescue_ramdisk',
|
||||
'value': 'r_id'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/rescue_kernel',
|
||||
'value': 'k_id'}]
|
||||
|
||||
self.ironic = mock.MagicMock()
|
||||
ironic_patcher = mock.patch(
|
||||
'tripleo_common.actions.base.TripleOAction.get_baremetal_client',
|
||||
return_value=self.ironic)
|
||||
self.mock_ironic = ironic_patcher.start()
|
||||
self.addCleanup(ironic_patcher.stop)
|
||||
|
||||
self.glance = mock.MagicMock()
|
||||
glance_patcher = mock.patch(
|
||||
'tripleo_common.actions.base.TripleOAction.get_image_client',
|
||||
return_value=self.glance)
|
||||
self.mock_glance = glance_patcher.start()
|
||||
self.addCleanup(glance_patcher.stop)
|
||||
|
||||
def mock_find(name, disk_format):
|
||||
if name == 'bm-deploy-kernel':
|
||||
return mock.MagicMock(id='k_id')
|
||||
if name == 'bm-deploy-ramdisk':
|
||||
return mock.MagicMock(id='r_id')
|
||||
self.glance.images.find = mock_find
|
||||
self.context = mock.MagicMock()
|
||||
|
||||
def test_run_instance_boot_option(self):
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID',
|
||||
instance_boot_option='netboot')
|
||||
result = action.run(self.context)
|
||||
self.assertIsNone(result)
|
||||
|
||||
self.node_update[0].update({'value': 'boot_option:netboot'})
|
||||
self.ironic.node.update.assert_called_once_with(mock.ANY,
|
||||
self.node_update)
|
||||
|
||||
def test_run_instance_boot_option_not_set(self):
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID')
|
||||
result = action.run(self.context)
|
||||
self.assertIsNone(result)
|
||||
|
||||
self.node_update[0].update({'value': ''})
|
||||
self.ironic.node.update.assert_called_once_with(mock.ANY,
|
||||
self.node_update)
|
||||
|
||||
def test_run_instance_boot_option_already_set_no_overwrite(self):
|
||||
node_mock = mock.MagicMock()
|
||||
node_mock.properties.get.return_value = ({'boot_option': 'netboot'})
|
||||
self.ironic.node.get.return_value = node_mock
|
||||
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID')
|
||||
result = action.run(self.context)
|
||||
self.assertIsNone(result)
|
||||
|
||||
self.node_update[0].update({'value': 'boot_option:netboot'})
|
||||
self.ironic.node.update.assert_called_once_with(mock.ANY,
|
||||
self.node_update)
|
||||
|
||||
def test_run_instance_boot_option_already_set_do_overwrite(self):
|
||||
node_mock = mock.MagicMock()
|
||||
node_mock.properties.get.return_value = ({'boot_option': 'netboot'})
|
||||
self.ironic.node.get.return_value = node_mock
|
||||
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID',
|
||||
instance_boot_option='local')
|
||||
result = action.run(self.context)
|
||||
self.assertIsNone(result)
|
||||
|
||||
self.node_update[0].update({'value': 'boot_option:local'})
|
||||
self.ironic.node.update.assert_called_once_with(mock.ANY,
|
||||
self.node_update)
|
||||
|
||||
def test_run_new_kernel_and_ram_image(self):
|
||||
image_ids = {'kernel': 'test_kernel_id', 'ramdisk': 'test_ramdisk_id'}
|
||||
|
||||
with mock.patch('tripleo_common.utils.glance.create_or_find_kernel_and'
|
||||
'_ramdisk') as mock_find:
|
||||
mock_find.return_value = image_ids
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID',
|
||||
kernel_name='test_kernel',
|
||||
ramdisk_name='test_ramdisk')
|
||||
result = action.run(self.context)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
self.node_update[0].update({'value': ''})
|
||||
self.node_update[1:] = [{'op': 'add',
|
||||
'path': '/driver_info/deploy_ramdisk',
|
||||
'value': 'test_ramdisk_id'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/deploy_kernel',
|
||||
'value': 'test_kernel_id'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/rescue_ramdisk',
|
||||
'value': 'test_ramdisk_id'},
|
||||
{'op': 'add',
|
||||
'path': '/driver_info/rescue_kernel',
|
||||
'value': 'test_kernel_id'}]
|
||||
self.ironic.node.update.assert_called_once_with(mock.ANY,
|
||||
self.node_update)
|
||||
|
||||
def test_run_glance_ids_not_found(self):
|
||||
self.glance.images.find = mock.Mock(
|
||||
side_effect=glance_exceptions.NotFound)
|
||||
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID',
|
||||
kernel_name='unknown_kernel',
|
||||
ramdisk_name='unknown_ramdisk')
|
||||
result = action.run(self.context)
|
||||
self.assertIn("not found", str(result.error))
|
||||
|
||||
def test_run_exception_on_node_update(self):
|
||||
self.ironic.node.update.side_effect = Exception("Update error")
|
||||
|
||||
action = baremetal.ConfigureBootAction(node_uuid='MOCK_UUID')
|
||||
result = action.run(self.context)
|
||||
|
||||
self.assertIn("Update error", str(result.error))
|
||||
|
||||
|
||||
class TestConfigureRootDeviceAction(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestConfigureRootDeviceAction, self).setUp()
|
||||
|
||||
# Mock data
|
||||
self.disks = [
|
||||
{'name': '/dev/sda', 'size': 11 * units.Gi},
|
||||
{'name': '/dev/sdb', 'size': 2 * units.Gi},
|
||||
{'name': '/dev/sdc', 'size': 5 * units.Gi},
|
||||
{'name': '/dev/sdd', 'size': 21 * units.Gi},
|
||||
{'name': '/dev/sde', 'size': 13 * units.Gi},
|
||||
]
|
||||
for i, disk in enumerate(self.disks):
|
||||
disk['wwn'] = 'wwn%d' % i
|
||||
disk['serial'] = 'serial%d' % i
|
||||
|
||||
# Ironic mocks
|
||||
self.ironic = mock.MagicMock()
|
||||
ironic_patcher = mock.patch(
|
||||
'tripleo_common.actions.base.TripleOAction.get_baremetal_client',
|
||||
return_value=self.ironic)
|
||||
self.mock_ironic = ironic_patcher.start()
|
||||
self.addCleanup(ironic_patcher.stop)
|
||||
|
||||
self.ironic.node.list.return_value = [
|
||||
mock.Mock(uuid="ABCDEFGH"),
|
||||
]
|
||||
|
||||
self.node = mock.Mock(uuid="ABCDEFGH", properties={})
|
||||
self.ironic.node.get.return_value = self.node
|
||||
|
||||
# inspector mocks
|
||||
self.inspector = mock.MagicMock()
|
||||
inspector_patcher = mock.patch(
|
||||
'tripleo_common.actions.base.TripleOAction.'
|
||||
'get_baremetal_introspection_client',
|
||||
return_value=self.inspector)
|
||||
self.mock_inspector = inspector_patcher.start()
|
||||
self.addCleanup(inspector_patcher.stop)
|
||||
|
||||
self.inspector.get_data.return_value = {
|
||||
'inventory': {'disks': self.disks}
|
||||
}
|
||||
self.context = mock.MagicMock()
|
||||
|
||||
def test_smallest(self):
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn': 'wwn2'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 4}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_smallest_with_ext(self):
|
||||
self.disks[2]['wwn_with_extension'] = 'wwnext'
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn_with_extension': 'wwnext'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 4}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_largest(self):
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='largest')
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn': 'wwn3'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 20}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_largest_with_ext(self):
|
||||
self.disks[3]['wwn_with_extension'] = 'wwnext'
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='largest')
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn_with_extension': 'wwnext'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 20}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_no_overwrite(self):
|
||||
self.node.properties['root_device'] = {'foo': 'bar'}
|
||||
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 0)
|
||||
|
||||
def test_with_overwrite(self):
|
||||
self.node.properties['root_device'] = {'foo': 'bar'}
|
||||
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest',
|
||||
overwrite=True)
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn': 'wwn2'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 4}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_minimum_size(self):
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest',
|
||||
minimum_size=10)
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn': 'wwn0'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 10}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_bad_inventory(self):
|
||||
self.inspector.get_data.return_value = {}
|
||||
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
self.assertRaisesRegex(exception.RootDeviceDetectionError,
|
||||
"Malformed introspection data",
|
||||
action.run,
|
||||
self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 0)
|
||||
|
||||
def test_no_disks(self):
|
||||
self.inspector.get_data.return_value = {
|
||||
'inventory': {
|
||||
'disks': [{'name': '/dev/sda', 'size': 1 * units.Gi}]
|
||||
}
|
||||
}
|
||||
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
self.assertRaisesRegex(exception.RootDeviceDetectionError,
|
||||
"No suitable disks",
|
||||
action.run,
|
||||
self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 0)
|
||||
|
||||
def test_no_data(self):
|
||||
self.inspector.get_data.side_effect = (
|
||||
ironic_inspector_client.ClientError(mock.Mock()))
|
||||
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
self.assertRaisesRegex(exception.RootDeviceDetectionError,
|
||||
"No introspection data",
|
||||
action.run,
|
||||
self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 0)
|
||||
|
||||
def test_no_wwn_and_serial(self):
|
||||
self.inspector.get_data.return_value = {
|
||||
'inventory': {
|
||||
'disks': [{'name': '/dev/sda', 'size': 10 * units.Gi}]
|
||||
}
|
||||
}
|
||||
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='smallest')
|
||||
self.assertRaisesRegex(exception.RootDeviceDetectionError,
|
||||
"Neither WWN nor serial number are known",
|
||||
action.run,
|
||||
self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 0)
|
||||
|
||||
def test_device_list(self):
|
||||
action = baremetal.ConfigureRootDeviceAction(
|
||||
node_uuid='MOCK_UUID',
|
||||
root_device='hda,sda,sdb,sdc')
|
||||
action.run(self.context)
|
||||
|
||||
self.assertEqual(self.ironic.node.update.call_count, 1)
|
||||
root_device_args = self.ironic.node.update.call_args_list[0]
|
||||
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
|
||||
'value': {'wwn': 'wwn0'}},
|
||||
{'op': 'add', 'path': '/properties/local_gb',
|
||||
'value': 10}]
|
||||
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
|
||||
root_device_args)
|
||||
|
||||
def test_device_list_not_found(self):
|
||||
action = baremetal.ConfigureRootDeviceAction(node_uuid='MOCK_UUID',
|
||||
root_device='hda')
|
||||
|
||||
self.assertRaisesRegex(exception.RootDeviceDetectionError,
|
||||
"Cannot find a disk",
|
||||
action.run,
|
||||
self.context)
|
||||
self.assertEqual(self.ironic.node.update.call_count, 0)
|
||||
|
||||
|
||||
@mock.patch.object(baremetal.socket, 'gethostbyname', lambda x: x)
|
||||
class TestGetCandidateNodes(base.TestCase):
|
||||
def setUp(self):
|
||||
super(TestGetCandidateNodes, self).setUp()
|
||||
self.existing_nodes = [
|
||||
{'uuid': '1', 'driver': 'ipmi',
|
||||
'driver_info': {'ipmi_address': '10.0.0.1'}},
|
||||
{'uuid': '2', 'driver': 'pxe_ipmitool',
|
||||
'driver_info': {'ipmi_address': '10.0.0.1', 'ipmi_port': 6235}},
|
||||
{'uuid': '3', 'driver': 'foobar', 'driver_info': {}},
|
||||
{'uuid': '4', 'driver': 'fake',
|
||||
'driver_info': {'fake_address': 42}},
|
||||
{'uuid': '5', 'driver': 'ipmi', 'driver_info': {}},
|
||||
{'uuid': '6', 'driver': 'pxe_drac',
|
||||
'driver_info': {'drac_address': '10.0.0.2'}},
|
||||
{'uuid': '7', 'driver': 'pxe_drac',
|
||||
'driver_info': {'drac_address': '10.0.0.3', 'drac_port': 6230}},
|
||||
]
|
||||
|
||||
def test_existing_ips(self):
|
||||
action = baremetal.GetCandidateNodes([], [], [], self.existing_nodes)
|
||||
result = action._existing_ips()
|
||||
|
||||
self.assertEqual({('10.0.0.1', 623), ('10.0.0.1', 6235),
|
||||
('10.0.0.2', None), ('10.0.0.3', 6230)},
|
||||
set(result))
|
||||
|
||||
def test_with_list(self):
|
||||
action = baremetal.GetCandidateNodes(
|
||||
['10.0.0.1', '10.0.0.2', '10.0.0.3'],
|
||||
[623, 6230, 6235],
|
||||
[['admin', 'password'], ['admin', 'admin']],
|
||||
self.existing_nodes)
|
||||
result = action.run(mock.Mock())
|
||||
|
||||
self.assertEqual([
|
||||
{'ip': '10.0.0.3', 'port': 623,
|
||||
'username': 'admin', 'password': 'password'},
|
||||
{'ip': '10.0.0.1', 'port': 6230,
|
||||
'username': 'admin', 'password': 'password'},
|
||||
{'ip': '10.0.0.3', 'port': 6235,
|
||||
'username': 'admin', 'password': 'password'},
|
||||
{'ip': '10.0.0.3', 'port': 623,
|
||||
'username': 'admin', 'password': 'admin'},
|
||||
{'ip': '10.0.0.1', 'port': 6230,
|
||||
'username': 'admin', 'password': 'admin'},
|
||||
{'ip': '10.0.0.3', 'port': 6235,
|
||||
'username': 'admin', 'password': 'admin'},
|
||||
], result)
|
||||
|
||||
def test_with_subnet(self):
|
||||
action = baremetal.GetCandidateNodes(
|
||||
'10.0.0.0/30',
|
||||
[623, 6230, 6235],
|
||||
[['admin', 'password'], ['admin', 'admin']],
|
||||
self.existing_nodes)
|
||||
result = action.run(mock.Mock())
|
||||
|
||||
self.assertEqual([
|
||||
{'ip': '10.0.0.1', 'port': 6230,
|
||||
'username': 'admin', 'password': 'password'},
|
||||
{'ip': '10.0.0.1', 'port': 6230,
|
||||
'username': 'admin', 'password': 'admin'},
|
||||
], result)
|
||||
|
||||
def test_invalid_subnet(self):
|
||||
action = baremetal.GetCandidateNodes(
|
||||
'meow',
|
||||
[623, 6230, 6235],
|
||||
[['admin', 'password'], ['admin', 'admin']],
|
||||
self.existing_nodes)
|
||||
result = action.run(mock.Mock())
|
||||
self.assertTrue(result.is_error())
|
||||
|
||||
|
||||
@mock.patch.object(processutils, 'execute', autospec=True)
|
||||
class TestProbeNode(base.TestCase):
|
||||
action = baremetal.ProbeNode('10.0.0.42', 623, 'admin', 'password')
|
||||
|
||||
def test_success(self, mock_execute):
|
||||
result = self.action.run(mock.Mock())
|
||||
self.assertEqual({'pm_type': 'ipmi',
|
||||
'pm_addr': '10.0.0.42',
|
||||
'pm_user': 'admin',
|
||||
'pm_password': 'password',
|
||||
'pm_port': 623},
|
||||
result)
|
||||
mock_execute.assert_called_once_with('ipmitool', '-I', 'lanplus',
|
||||
'-H', '10.0.0.42',
|
||||
'-L', 'ADMINISTRATOR',
|
||||
'-p', '623', '-U', 'admin',
|
||||
'-f', mock.ANY, 'power', 'status',
|
||||
attempts=2)
|
||||
|
||||
def test_failure(self, mock_execute):
|
||||
mock_execute.side_effect = processutils.ProcessExecutionError()
|
||||
self.assertIsNone(self.action.run(mock.Mock()))
|
||||
mock_execute.assert_called_once_with('ipmitool', '-I', 'lanplus',
|
||||
'-H', '10.0.0.42',
|
||||
'-L', 'ADMINISTRATOR',
|
||||
'-p', '623', '-U', 'admin',
|
||||
'-f', mock.ANY, 'power', 'status',
|
||||
attempts=2)
|
|
@ -1,42 +0,0 @@
|
|||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from ironicclient import client as ironicclient
|
||||
|
||||
from tripleo_common.actions import base
|
||||
from tripleo_common.tests import base as tests_base
|
||||
from tripleo_common.utils import keystone as keystone_utils
|
||||
|
||||
|
||||
@mock.patch.object(keystone_utils, 'get_endpoint_for_project')
|
||||
class TestActionsBase(tests_base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestActionsBase, self).setUp()
|
||||
self.action = base.TripleOAction()
|
||||
|
||||
@mock.patch.object(ironicclient, 'get_client', autospec=True)
|
||||
def test_get_baremetal_client(self, mock_client, mock_endpoint):
|
||||
mock_cxt = mock.MagicMock()
|
||||
mock_endpoint.return_value = mock.Mock(
|
||||
url='http://ironic/v1', region='ironic-region')
|
||||
self.action.get_baremetal_client(mock_cxt)
|
||||
mock_client.assert_called_once_with(
|
||||
1, endpoint='http://ironic/v1', max_retries=12,
|
||||
os_ironic_api_version='1.58', region_name='ironic-region',
|
||||
retry_interval=5, token=mock.ANY)
|
||||
mock_endpoint.assert_called_once_with(mock_cxt.security, 'ironic')
|
||||
mock_cxt.assert_not_called()
|
Loading…
Reference in New Issue