Add ansible deploy interface

this patch pulls the ansible deploy interface code
and related ansible playbooks and auxiliary files
from ironic-staging-drivers project into main ironic tree.

As discussed in the spec, the use of ramdisk callbacks
(lookup and hearbeats) is now mandatory with this deploy interface.
Playbooks and modules were updated to require Ansible>=2.4,
and custom Ansible module for executing 'parted' was replaced
with usage of built-in Ansible module.

The custom Ansible callback plugin now uses journald logger
by default to adapt to the default DevStack setup.

Documentation and devstack plugin changes enabling automated
testing of this interface will be proposed in followup patches.

Change-Id: I43f54688287953ccb1c2836437aea76236e6560b
Related-Bug: #1526308
This commit is contained in:
Pavlo Shchelokovskyy 2017-10-09 07:09:31 +00:00
parent 9175c2325e
commit 9b1a7ceb4b
44 changed files with 2630 additions and 2 deletions

View File

@ -100,7 +100,9 @@ pygments_style = 'sphinx'
# A list of glob-style patterns that should be excluded when looking for
# source files. They are matched against the source file names relative to the
# source directory, using slashes as directory separators on all platforms.
exclude_patterns = ['api/ironic_tempest_plugin.*']
exclude_patterns = ['api/ironic_tempest_plugin.*',
'api/ironic.drivers.modules.ansible.playbooks.*',
'api/ironic.tests.*']
# Ignore the following warning: WARNING: while setting up extension
# wsmeext.sphinxext: directive 'autoattribute' is already registered,

View File

@ -18,3 +18,6 @@ ImcSdk>=0.7.2
# The Redfish hardware type uses the Sushy library
sushy
# Ansible-deploy interface
ansible>=2.4

View File

@ -813,6 +813,78 @@
#deploy_logs_swift_days_to_expire = 30
[ansible]
#
# From ironic
#
# Extra arguments to pass on every invocation of Ansible.
# (string value)
#ansible_extra_args = <None>
# Set ansible verbosity level requested when invoking
# "ansible-playbook" command. 4 includes detailed SSH session
# logging. Default is 4 when global debug is enabled and 0
# otherwise. (integer value)
# Minimum value: 0
# Maximum value: 4
#verbosity = <None>
# Path to "ansible-playbook" script. Default will search the
# $PATH configured for user running ironic-conductor process.
# Provide the full path when ansible-playbook is not in $PATH
# or installed in not default location. (string value)
#ansible_playbook_script = ansible-playbook
# Path to directory with playbooks, roles and local inventory.
# (string value)
#playbooks_path = $pybasedir/drivers/modules/ansible/playbooks
# Path to ansible configuration file. If set to empty, system
# default will be used. (string value)
#config_file_path = $pybasedir/drivers/modules/ansible/playbooks/ansible.cfg
# Number of times to retry getting power state to check if
# bare metal node has been powered off after a soft power off.
# Value of 0 means do not retry on failure. (integer value)
# Minimum value: 0
#post_deploy_get_power_state_retries = 6
# Amount of time (in seconds) to wait between polling power
# state after trigger soft poweroff. (integer value)
# Minimum value: 0
#post_deploy_get_power_state_retry_interval = 5
# Extra amount of memory in MiB expected to be consumed by
# Ansible-related processes on the node. Affects decision
# whether image will fit into RAM. (integer value)
#extra_memory = 10
# Skip verifying SSL connections to the image store when
# downloading the image. Setting it to "True" is only
# recommended for testing environments that use self-signed
# certificates. (boolean value)
#image_store_insecure = false
# Specific CA bundle to use for validating SSL connections to
# the image store. If not specified, CA available in the
# ramdisk will be used. Is not used by default playbooks
# included with the driver. Suitable for environments that use
# self-signed certificates. (string value)
#image_store_cafile = <None>
# Client cert to use for SSL connections to image store. Is
# not used by default playbooks included with the driver.
# (string value)
#image_store_certfile = <None>
# Client key to use for SSL connections to image store. Is not
# used by default playbooks included with the driver. (string
# value)
#image_store_keyfile = <None>
[api]
#

View File

@ -16,6 +16,7 @@
from oslo_config import cfg
from ironic.conf import agent
from ironic.conf import ansible
from ironic.conf import api
from ironic.conf import audit
from ironic.conf import cinder
@ -47,6 +48,7 @@ from ironic.conf import swift
CONF = cfg.CONF
agent.register_opts(CONF)
ansible.register_opts(CONF)
api.register_opts(CONF)
audit.register_opts(CONF)
cinder.register_opts(CONF)

96
ironic/conf/ansible.py Normal file
View File

@ -0,0 +1,96 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.StrOpt('ansible_extra_args',
help=_('Extra arguments to pass on every '
'invocation of Ansible.')),
cfg.IntOpt('verbosity',
min=0,
max=4,
help=_('Set ansible verbosity level requested when invoking '
'"ansible-playbook" command. '
'4 includes detailed SSH session logging. '
'Default is 4 when global debug is enabled '
'and 0 otherwise.')),
cfg.StrOpt('ansible_playbook_script',
default='ansible-playbook',
help=_('Path to "ansible-playbook" script. '
'Default will search the $PATH configured for user '
'running ironic-conductor process. '
'Provide the full path when ansible-playbook is not in '
'$PATH or installed in not default location.')),
cfg.StrOpt('playbooks_path',
default=os.path.join('$pybasedir',
'drivers/modules/ansible/playbooks'),
help=_('Path to directory with playbooks, roles and '
'local inventory.')),
cfg.StrOpt('config_file_path',
default=os.path.join(
'$pybasedir',
'drivers/modules/ansible/playbooks/ansible.cfg'),
help=_('Path to ansible configuration file. If set to empty, '
'system default will be used.')),
cfg.IntOpt('post_deploy_get_power_state_retries',
min=0,
default=6,
help=_('Number of times to retry getting power state to check '
'if bare metal node has been powered off after a soft '
'power off. Value of 0 means do not retry on failure.')),
cfg.IntOpt('post_deploy_get_power_state_retry_interval',
min=0,
default=5,
help=_('Amount of time (in seconds) to wait between polling '
'power state after trigger soft poweroff.')),
cfg.IntOpt('extra_memory',
default=10,
help=_('Extra amount of memory in MiB expected to be consumed '
'by Ansible-related processes on the node. Affects '
'decision whether image will fit into RAM.')),
cfg.BoolOpt('image_store_insecure',
default=False,
help=_('Skip verifying SSL connections to the image store '
'when downloading the image. '
'Setting it to "True" is only recommended for testing '
'environments that use self-signed certificates.')),
cfg.StrOpt('image_store_cafile',
help=_('Specific CA bundle to use for validating '
'SSL connections to the image store. '
'If not specified, CA available in the ramdisk '
'will be used. '
'Is not used by default playbooks included with '
'the driver. '
'Suitable for environments that use self-signed '
'certificates.')),
cfg.StrOpt('image_store_certfile',
help=_('Client cert to use for SSL connections '
'to image store. '
'Is not used by default playbooks included with '
'the driver.')),
cfg.StrOpt('image_store_keyfile',
help=_('Client key to use for SSL connections '
'to image store. '
'Is not used by default playbooks included with '
'the driver.')),
]
def register_opts(conf):
conf.register_opts(opts, group='ansible')

View File

@ -34,6 +34,7 @@ _default_opt_lists = [
_opts = [
('DEFAULT', itertools.chain(*_default_opt_lists)),
('agent', ironic.conf.agent.opts),
('ansible', ironic.conf.ansible.opts),
('api', ironic.conf.api.opts),
('audit', ironic.conf.audit.opts),
('cimc', ironic.conf.cisco.cimc_opts),

View File

@ -18,6 +18,7 @@ Generic hardware types.
from ironic.drivers import hardware_type
from ironic.drivers.modules import agent
from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
@ -45,7 +46,8 @@ class GenericHardware(hardware_type.AbstractHardwareType):
@property
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
return [iscsi_deploy.ISCSIDeploy, agent.AgentDeploy]
return [iscsi_deploy.ISCSIDeploy, agent.AgentDeploy,
ansible_deploy.AnsibleDeploy]
@property
def supported_inspect_interfaces(self):

View File

@ -0,0 +1,619 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ansible deploy interface
"""
import json
import os
import shlex
from ironic_lib import metrics_utils
from ironic_lib import utils as irlib_utils
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import retrying
import six
import six.moves.urllib.parse as urlparse
import yaml
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base_vendor as agent_base
from ironic.drivers.modules import deploy_utils
LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
DEFAULT_PLAYBOOKS = {
'deploy': 'deploy.yaml',
'shutdown': 'shutdown.yaml',
'clean': 'clean.yaml'
}
DEFAULT_CLEAN_STEPS = 'clean_steps.yaml'
OPTIONAL_PROPERTIES = {
'ansible_deploy_username': _('Deploy ramdisk username for Ansible. '
'This user must have passwordless sudo '
'permissions. Default is "ansible". '
'Optional.'),
'ansible_deploy_key_file': _('Path to private key file. If not specified, '
'default keys for user running '
'ironic-conductor process will be used. '
'Note that for keys with password, those '
'must be pre-loaded into ssh-agent. '
'Optional.'),
'ansible_deploy_playbook': _('Name of the Ansible playbook used for '
'deployment. Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['deploy'],
'ansible_shutdown_playbook': _('Name of the Ansible playbook used to '
'power off the node in-band. '
'Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['shutdown'],
'ansible_clean_playbook': _('Name of the Ansible playbook used for '
'cleaning. Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['clean'],
'ansible_clean_steps_config': _('Name of the file with default cleaning '
'steps configuration. Default is %s. '
'Optional.'
) % DEFAULT_CLEAN_STEPS
}
COMMON_PROPERTIES = OPTIONAL_PROPERTIES
INVENTORY_FILE = os.path.join(CONF.ansible.playbooks_path, 'inventory')
class PlaybookNotFound(exception.IronicException):
_msg_fmt = _('Failed to set ansible playbook for action %(action)s')
def _parse_ansible_driver_info(node, action='deploy'):
user = node.driver_info.get('ansible_deploy_username', 'ansible')
key = node.driver_info.get('ansible_deploy_key_file')
playbook = node.driver_info.get('ansible_%s_playbook' % action,
DEFAULT_PLAYBOOKS.get(action))
if not playbook:
raise PlaybookNotFound(action=action)
return playbook, user, key
def _get_configdrive_path(basename):
return os.path.join(CONF.tempdir, basename + '.cndrive')
def _get_node_ip(task):
callback_url = task.node.driver_internal_info.get('agent_url', '')
return urlparse.urlparse(callback_url).netloc.split(':')[0]
def _prepare_extra_vars(host_list, variables=None):
nodes_var = []
for node_uuid, ip, user, extra in host_list:
nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
extra_vars = dict(nodes=nodes_var)
if variables:
extra_vars.update(variables)
return extra_vars
def _run_playbook(name, extra_vars, key, tags=None, notags=None):
"""Execute ansible-playbook."""
playbook = os.path.join(CONF.ansible.playbooks_path, name)
ironic_vars = {'ironic': extra_vars}
args = [CONF.ansible.ansible_playbook_script, playbook,
'-i', INVENTORY_FILE,
'-e', json.dumps(ironic_vars),
]
if CONF.ansible.config_file_path:
env = ['env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path]
args = env + args
if tags:
args.append('--tags=%s' % ','.join(tags))
if notags:
args.append('--skip-tags=%s' % ','.join(notags))
if key:
args.append('--private-key=%s' % key)
verbosity = CONF.ansible.verbosity
if verbosity is None and CONF.debug:
verbosity = 4
if verbosity:
args.append('-' + 'v' * verbosity)
if CONF.ansible.ansible_extra_args:
args.extend(shlex.split(CONF.ansible.ansible_extra_args))
try:
out, err = utils.execute(*args)
return out, err
except processutils.ProcessExecutionError as e:
raise exception.InstanceDeployFailure(reason=e)
def _calculate_memory_req(task):
image_source = task.node.instance_info['image_source']
image_size = images.download_size(task.context, image_source)
return image_size // units.Mi + CONF.ansible.extra_memory
def _parse_partitioning_info(node):
info = node.instance_info
i_info = {'label': deploy_utils.get_disk_label(node) or 'msdos'}
is_gpt = i_info['label'] == 'gpt'
unit = 'MiB'
partitions = {}
def add_partition(name, start, end):
partitions[name] = {'number': len(partitions) + 1,
'part_start': '%i%s' % (start, unit),
'part_end': '%i%s' % (end, unit)}
if is_gpt:
partitions[name]['name'] = name
end = 1
if is_gpt:
# prepend 1MiB bios_grub partition for GPT so that grub(2) installs
start, end = end, end + 1
add_partition('bios', start, end)
partitions['bios']['flags'] = ['bios_grub']
ephemeral_mb = info['ephemeral_mb']
if ephemeral_mb:
start, end = end, end + ephemeral_mb
add_partition('ephemeral', start, end)
i_info['ephemeral_format'] = info['ephemeral_format']
i_info['preserve_ephemeral'] = (
'yes' if info['preserve_ephemeral'] else 'no')
swap_mb = info['swap_mb']
if swap_mb:
start, end = end, end + swap_mb
add_partition('swap', start, end)
configdrive = info.get('configdrive')
if configdrive:
# pre-create 64MiB partition for configdrive
start, end = end, end + 64
add_partition('configdrive', start, end)
# NOTE(pas-ha) make the root partition last so that
# e.g. cloud-init can grow it on first start
start, end = end, end + info['root_mb']
add_partition('root', start, end)
if not is_gpt:
partitions['root']['flags'] = ['boot']
i_info['partitions'] = partitions
return {'partition_info': i_info}
def _parse_root_device_hints(node):
"""Convert string with hints to dict. """
root_device = node.properties.get('root_device')
if not root_device:
return {}
try:
parsed_hints = irlib_utils.parse_root_device_hints(root_device)
except ValueError as e:
raise exception.InvalidParameterValue(
_('Failed to validate the root device hints for node %(node)s. '
'Error: %(error)s') % {'node': node.uuid, 'error': e})
root_device_hints = {}
advanced = {}
for hint, value in parsed_hints.items():
if isinstance(value, six.string_types):
if value.startswith('== '):
root_device_hints[hint] = int(value[3:])
elif value.startswith('s== '):
root_device_hints[hint] = urlparse.unquote(value[4:])
else:
advanced[hint] = value
else:
root_device_hints[hint] = value
if advanced:
raise exception.InvalidParameterValue(
_('Ansible-deploy does not support advanced root device hints '
'based on oslo.utils operators. '
'Present advanced hints for node %(node)s are %(hints)s.') % {
'node': node.uuid, 'hints': advanced})
return root_device_hints
def _add_ssl_image_options(image):
image['validate_certs'] = ('no' if CONF.ansible.image_store_insecure
else 'yes')
if CONF.ansible.image_store_cafile:
image['cafile'] = CONF.ansible.image_store_cafile
if CONF.ansible.image_store_certfile and CONF.ansible.image_store_keyfile:
image['client_cert'] = CONF.ansible.image_store_certfile
image['client_key'] = CONF.ansible.image_store_keyfile
def _prepare_variables(task):
node = task.node
i_info = node.instance_info
image = {}
for i_key, i_value in i_info.items():
if i_key.startswith('image_'):
image[i_key[6:]] = i_value
image['mem_req'] = _calculate_memory_req(task)
checksum = image.get('checksum')
if checksum:
# NOTE(pas-ha) checksum can be in <algo>:<checksum> format
# as supported by various Ansible modules, mostly good for
# standalone Ironic case when instance_info is populated manually.
# With no <algo> we take that instance_info is populated from Glance,
# where API reports checksum as MD5 always.
if ':' not in checksum:
image['checksum'] = 'md5:%s' % checksum
_add_ssl_image_options(image)
variables = {'image': image}
configdrive = i_info.get('configdrive')
if configdrive:
if urlparse.urlparse(configdrive).scheme in ('http', 'https'):
cfgdrv_type = 'url'
cfgdrv_location = configdrive
else:
cfgdrv_location = _get_configdrive_path(node.uuid)
with open(cfgdrv_location, 'w') as f:
f.write(configdrive)
cfgdrv_type = 'file'
variables['configdrive'] = {'type': cfgdrv_type,
'location': cfgdrv_location}
root_device_hints = _parse_root_device_hints(node)
if root_device_hints:
variables['root_device_hints'] = root_device_hints
return variables
def _validate_clean_steps(steps, node_uuid):
missing = []
for step in steps:
name = step.get('name')
if not name:
missing.append({'name': 'undefined', 'field': 'name'})
continue
if 'interface' not in step:
missing.append({'name': name, 'field': 'interface'})
args = step.get('args', {})
for arg_name, arg in args.items():
if arg.get('required', False) and 'value' not in arg:
missing.append({'name': name,
'field': '%s.value' % arg_name})
if missing:
err_string = ', '.join(
'name %(name)s, field %(field)s' % i for i in missing)
msg = _("Malformed clean_steps file: %s") % err_string
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
if len(set(s['name'] for s in steps)) != len(steps):
msg = _("Cleaning steps do not have unique names.")
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
def _get_clean_steps(node, interface=None, override_priorities=None):
"""Get cleaning steps."""
clean_steps_file = node.driver_info.get('ansible_clean_steps_config',
DEFAULT_CLEAN_STEPS)
path = os.path.join(CONF.ansible.playbooks_path, clean_steps_file)
try:
with open(path) as f:
internal_steps = yaml.safe_load(f)
except Exception as e:
msg = _('Failed to load clean steps from file '
'%(file)s: %(exc)s') % {'file': path, 'exc': e}
raise exception.NodeCleaningFailure(node=node.uuid, reason=msg)
_validate_clean_steps(internal_steps, node.uuid)
steps = []
override = override_priorities or {}
for params in internal_steps:
name = params['name']
clean_if = params['interface']
if interface is not None and interface != clean_if:
continue
new_priority = override.get(name)
priority = (new_priority if new_priority is not None else
params.get('priority', 0))
args = {}
argsinfo = params.get('args', {})
for arg, arg_info in argsinfo.items():
args[arg] = arg_info.pop('value', None)
step = {
'interface': clean_if,
'step': name,
'priority': priority,
'abortable': False,
'argsinfo': argsinfo,
'args': args
}
steps.append(step)
return steps
class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
"""Interface for deploy-related actions."""
def __init__(self):
super(AnsibleDeploy, self).__init__()
# NOTE(pas-ha) overriding agent creation as we won't be
# communicating with it, only processing heartbeats
self._client = None
def get_properties(self):
"""Return the properties of the interface."""
props = COMMON_PROPERTIES.copy()
# NOTE(pas-ha) this is to get the deploy_forces_oob_reboot property
props.update(agent_base.VENDOR_PROPERTIES)
return props
@METRICS.timer('AnsibleDeploy.validate')
def validate(self, task):
"""Validate the driver-specific Node deployment info."""
task.driver.boot.validate(task)
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
raise exception.InvalidParameterValue(_(
"Node %(node)s is configured to use the %(driver)s driver "
"which does not support netboot.") % {'node': node.uuid,
'driver': node.driver})
params = {}
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
'parameters were missing') % node.uuid
deploy_utils.check_for_missing_params(params, error_msg)
# validate root device hints, proper exceptions are raised from there
_parse_root_device_hints(node)
def _ansible_deploy(self, task, node_address):
"""Internal function for deployment to a node."""
node = task.node
LOG.debug('IP of node %(node)s is %(ip)s',
{'node': node.uuid, 'ip': node_address})
variables = _prepare_variables(task)
if not node.driver_internal_info.get('is_whole_disk_image'):
variables.update(_parse_partitioning_info(task.node))
playbook, user, key = _parse_ansible_driver_info(task.node)
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list, variables=variables)
LOG.debug('Starting deploy on node %s', node.uuid)
# any caller should manage exceptions raised from here
_run_playbook(playbook, extra_vars, key)
@METRICS.timer('AnsibleDeploy.deploy')
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Perform a deployment to a node."""
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
@METRICS.timer('AnsibleDeploy.tear_down')
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node."""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.unconfigure_tenant_networks(task)
return states.DELETED
@METRICS.timer('AnsibleDeploy.prepare')
def prepare(self, task):
"""Prepare the deployment environment for this node."""
node = task.node
# TODO(pas-ha) investigate takeover scenario
if node.provision_state == states.DEPLOYING:
# adding network-driver dependent provisioning ports
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.add_provisioning_network(task)
if node.provision_state not in [states.ACTIVE, states.ADOPTING]:
node.instance_info = deploy_utils.build_instance_info_for_deploy(
task)
node.save()
boot_opt = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, boot_opt)
@METRICS.timer('AnsibleDeploy.clean_up')
def clean_up(self, task):
"""Clean up the deployment environment for this node."""
task.driver.boot.clean_up_ramdisk(task)
provider = dhcp_factory.DHCPFactory()
provider.clean_dhcp(task)
irlib_utils.unlink_without_raise(
_get_configdrive_path(task.node.uuid))
def take_over(self, task):
LOG.error("Ansible deploy does not support take over. "
"You must redeploy the node %s explicitly.",
task.node.uuid)
def get_clean_steps(self, task):
"""Get the list of clean steps from the file.
:param task: a TaskManager object containing the node
:returns: A list of clean step dictionaries
"""
new_priorities = {
'erase_devices': CONF.deploy.erase_devices_priority,
'erase_devices_metadata':
CONF.deploy.erase_devices_metadata_priority
}
return _get_clean_steps(task.node, interface='deploy',
override_priorities=new_priorities)
@METRICS.timer('AnsibleDeploy.execute_clean_step')
def execute_clean_step(self, task, step):
"""Execute a clean step.
:param task: a TaskManager object containing the node
:param step: a clean step dictionary to execute
:returns: None
"""
node = task.node
playbook, user, key = _parse_ansible_driver_info(
task.node, action='clean')
stepname = step['step']
node_address = _get_node_ip(task)
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list)
LOG.debug('Starting cleaning step %(step)s on node %(node)s',
{'node': node.uuid, 'step': stepname})
step_tags = step['args'].get('tags', [])
try:
_run_playbook(playbook, extra_vars, key,
tags=step_tags)
except exception.InstanceDeployFailure as e:
LOG.error("Ansible failed cleaning step %(step)s "
"on node %(node)s.",
{'node': node.uuid, 'step': stepname})
manager_utils.cleaning_error_handler(task, six.text_type(e))
else:
LOG.info('Ansible completed cleaning step %(step)s '
'on node %(node)s.',
{'node': node.uuid, 'step': stepname})
@METRICS.timer('AnsibleDeploy.prepare_cleaning')
def prepare_cleaning(self, task):
"""Boot into the ramdisk to prepare for cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the previous cleaning ports cannot
be removed or if new cleaning ports cannot be created
:returns: None or states.CLEANWAIT for async prepare.
"""
node = task.node
manager_utils.set_node_cleaning_steps(task)
if not node.driver_internal_info['clean_steps']:
# no clean steps configured, nothing to do.
return
task.driver.network.add_cleaning_network(task)
boot_opt = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, boot_opt)
manager_utils.node_power_action(task, states.REBOOT)
return states.CLEANWAIT
@METRICS.timer('AnsibleDeploy.tear_down_cleaning')
def tear_down_cleaning(self, task):
"""Clean up the PXE and DHCP files after cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the cleaning ports cannot be
removed
"""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.boot.clean_up_ramdisk(task)
task.driver.network.remove_cleaning_network(task)
@METRICS.timer('AnsibleDeploy.continue_deploy')
def continue_deploy(self, task):
# NOTE(pas-ha) the lock should be already upgraded in heartbeat,
# just setting its purpose for better logging
task.upgrade_lock(purpose='deploy')
task.process_event('resume')
# NOTE(pas-ha) this method is called from heartbeat processing only,
# so we are sure we need this particular method, not the general one
node_address = _get_node_ip(task)
self._ansible_deploy(task, node_address)
self.reboot_to_instance(task)
@METRICS.timer('AnsibleDeploy.reboot_to_instance')
def reboot_to_instance(self, task):
node = task.node
LOG.info('Ansible complete deploy on node %s', node.uuid)
LOG.debug('Rebooting node %s to instance', node.uuid)
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
self.reboot_and_finish_deploy(task)
task.driver.boot.clean_up_ramdisk(task)
@METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy')
def reboot_and_finish_deploy(self, task):
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
@retrying.retry(
stop_max_attempt_number=attempts,
retry_on_result=lambda state: state != states.POWER_OFF,
wait_fixed=wait
)
def _wait_until_powered_off(task):
return task.driver.power.get_power_state(task)
node = task.node
oob_power_off = strutils.bool_from_string(
node.driver_info.get('deploy_forces_oob_reboot', False))
try:
if not oob_power_off:
try:
node_address = _get_node_ip(task)
playbook, user, key = _parse_ansible_driver_info(
node, action='shutdown')
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list)
_run_playbook(playbook, extra_vars, key)
_wait_until_powered_off(task)
except Exception as e:
LOG.warning('Failed to soft power off node %(node_uuid)s '
'in at least %(timeout)d seconds. '
'Error: %(error)s',
{'node_uuid': node.uuid,
'timeout': (wait * (attempts - 1)) / 1000,
'error': e})
# NOTE(pas-ha) flush is a part of deploy playbook
# so if it finished successfully we can safely
# power off the node out-of-band
manager_utils.node_power_action(task, states.POWER_OFF)
else:
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.remove_provisioning_network(task)
task.driver.network.configure_tenant_networks(task)
manager_utils.node_power_action(task, states.POWER_ON)
except Exception as e:
msg = (_('Error rebooting node %(node)s after deploy. '
'Error: %(error)s') %
{'node': node.uuid, 'error': e})
agent_base.log_and_raise_deployment_error(task, msg)
task.process_event('done')
LOG.info('Deployment to node %s done', task.node.uuid)

View File

@ -0,0 +1,11 @@
- hosts: conductor
gather_facts: no
tasks:
- add_host:
group: ironic
hostname: "{{ item.name }}"
ansible_host: "{{ item.ip }}"
ansible_user: "{{ item.user }}"
ironic_extra: "{{ item.extra | default({}) }}"
with_items: "{{ ironic.nodes }}"
tags: always

View File

@ -0,0 +1,35 @@
[defaults]
# retries through the ansible-deploy driver are not supported
retry_files_enabled = False
# this is using supplied callback_plugin to interleave ansible event logs
# into Ironic-conductor log as set in ironic configuration file,
# see callback_plugin/ironic_log.ini for some options to set
# (DevStack _needs_ some tweaks)
callback_whitelist = ironic_log
# For better security, bake SSH host keys into bootstrap image,
# add those to ~/.ssh/known_hosts for user running ironic-conductor service
# on all nodes where ironic-conductor and ansible-deploy driver are installed,
# and set the host_key_checking to True (or comment it out, it is the default)
host_key_checking = False
# uncomment if you have problem with ramdisk locale on ansible >= 2.1
#module_set_locale=False
# This sets the interval (in seconds) of Ansible internal processes polling
# each other. Lower values improve performance with large playbooks at
# the expense of extra CPU load. Higher values are more suitable for Ansible
# usage in automation scenarios, when UI responsiveness is not required but
# CPU usage might be a concern.
# Default corresponds to the value hardcoded in Ansible ≤ 2.1:
#internal_poll_interval = 0.001
[ssh_connection]
# pipelining greatly increases speed of deployment, disable it only when
# your version of ssh client on ironic node or server in bootstrap image
# do not support it or if you can not disable "requiretty" for the
# passwordless sudoer user in the bootstrap image.
# See Ansible documentation for more info:
# http://docs.ansible.com/ansible/intro_configuration.html#pipelining
pipelining = True

View File

@ -0,0 +1,15 @@
[ironic]
# If Ironic's config is not in one of default oslo_config locations,
# specify the path to it here
#config_file =
# Force usage of journald
#use_journal = True
# Force usage of syslog
#use_syslog = False
# Force usage of given file to log to.
# Useful for a testing system with only stderr logging
# (e.g. DevStack deployed w/o systemd)
#log_file =

View File

@ -0,0 +1,148 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import pbr.version
CONF = cfg.CONF
DOMAIN = 'ironic'
VERSION = pbr.version.VersionInfo(DOMAIN).release_string()
# find and parse callback config file
def parse_callback_config():
basename = os.path.splitext(__file__)[0]
config = ConfigParser.ConfigParser()
callback_config = {'ironic_config': None,
'ironic_log_file': None,
'use_journal': True,
'use_syslog': False}
try:
config.readfp(open(basename + ".ini"))
if config.has_option('ironic', 'config_file'):
callback_config['ironic_config'] = config.get(
'ironic', 'config_file')
if config.has_option('ironic', 'log_file'):
callback_config['ironic_log_file'] = config.get(
'ironic', 'log_file')
if config.has_option('ironic', 'use_journal'):
callback_config['use_journal'] = strutils.bool_from_string(
config.get('ironic', 'use_journal'))
if config.has_option('ironic', 'use_syslog'):
callback_config['use_syslog'] = strutils.bool_from_string(
config.get('ironic', 'use_syslog'))
except Exception:
pass
return callback_config
def setup_log():
logging.register_options(CONF)
conf_kwargs = dict(args=[], project=DOMAIN, version=VERSION)
callback_config = parse_callback_config()
if callback_config['ironic_config']:
conf_kwargs['default_config_files'] = [
callback_config['ironic_config']]
CONF(**conf_kwargs)
if callback_config['use_journal']:
CONF.set_override('use_journal', True)
if callback_config['use_syslog']:
CONF.set_override('use_syslog', True)
if callback_config['ironic_log_file']:
CONF.set_override("log_file", callback_config['ironic_log_file'])
logging.setup(CONF, DOMAIN)
class CallbackModule(object):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'ironic_log'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
setup_log()
self.log = logging.getLogger(__name__)
self.node = None
self.opts = {}
# NOTE(pas-ha) this method is required for Ansible>=2.4
# TODO(pas-ha) rewrite to support defining callback plugin options
# in ansible.cfg after we require Ansible >=2.4
def set_options(self, options):
self.opts = options
def runner_msg_dict(self, result):
self.node = result._host.get_name()
name = result._task.get_name()
res = str(result._result)
return dict(node=self.node, name=name, res=res)
def v2_playbook_on_task_start(self, task, is_conditional):
# NOTE(pas-ha) I do not know (yet) how to obtain a ref to host
# until first task is processed
node = self.node or "Node"
name = task.get_name()
if name == 'setup':
self.log.debug("Processing task %(name)s.", dict(name=name))
else:
self.log.debug("Processing task %(name)s on node %(node)s.",
dict(name=name, node=node))
def v2_runner_on_failed(self, result, *args, **kwargs):
self.log.error(
"Ansible task %(name)s failed on node %(node)s: %(res)s",
self.runner_msg_dict(result))
def v2_runner_on_ok(self, result):
msg_dict = self.runner_msg_dict(result)
if msg_dict['name'] == 'setup':
self.log.info("Ansible task 'setup' complete on node %(node)s",
msg_dict)
else:
self.log.info("Ansible task %(name)s complete on node %(node)s: "
"%(res)s", msg_dict)
def v2_runner_on_unreachable(self, result):
self.log.error(
"Node %(node)s was unreachable for Ansible task %(name)s: %(res)s",
self.runner_msg_dict(result))
def v2_runner_on_async_poll(self, result):
self.log.debug("Polled ansible task %(name)s for complete "
"on node %(node)s: %(res)s",
self.runner_msg_dict(result))
def v2_runner_on_async_ok(self, result):
self.log.info("Async Ansible task %(name)s complete on node %(node)s: "
"%(res)s", self.runner_msg_dict(result))
def v2_runner_on_async_failed(self, result):
self.log.error("Async Ansible task %(name)s failed on node %(node)s: "
"%(res)s", self.runner_msg_dict(result))
def v2_runner_on_skipped(self, result):
self.log.debug(
"Ansible task %(name)s skipped on node %(node)s: %(res)s",
self.runner_msg_dict(result))

View File

@ -0,0 +1,6 @@
---
- import_playbook: add-ironic-nodes.yaml
- hosts: ironic
roles:
- clean

View File

@ -0,0 +1,19 @@
- name: erase_devices_metadata
priority: 99
interface: deploy
args:
tags:
required: true
description: list of playbook tags used to erase partition table on disk devices
value:
- zap
- name: erase_devices
priority: 10
interface: deploy
args:
tags:
required: true
description: list of playbook tags used to erase disk devices
value:
- shred

View File

@ -0,0 +1,12 @@
---
- import_playbook: add-ironic-nodes.yaml
- hosts: ironic
roles:
- discover
- prepare
- deploy
- configure
post_tasks:
- name: flush disk state
command: sync

View File

@ -0,0 +1 @@
conductor ansible_connection=local

View File

@ -0,0 +1,64 @@
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
COLLECT_INFO = (('wwn', 'WWN'), ('serial', 'SERIAL_SHORT'),
('wwn_with_extension', 'WWN_WITH_EXTENSION'),
('wwn_vendor_extension', 'WWN_VENDOR_EXTENSION'))
def get_devices_wwn(devices, module):
try:
import pyudev
# NOTE(pas-ha) creating context might fail if udev is missing
context = pyudev.Context()
except ImportError:
module.warn('Can not collect "wwn", "wwn_with_extension", '
'"wwn_vendor_extension" and "serial" when using '
'root device hints because there\'s no UDEV python '
'binds installed')
return {}
dev_dict = {}
for device in devices:
name = '/dev/' + device
try:
udev = pyudev.Device.from_device_file(context, name)
except (ValueError, EnvironmentError, pyudev.DeviceNotFoundError) as e:
module.warn('Device %(dev)s is inaccessible, skipping... '
'Error: %(error)s' % {'dev': name, 'error': e})
continue
dev_dict[device] = {}
for key, udev_key in COLLECT_INFO:
dev_dict[device][key] = udev.get('ID_%s' % udev_key)
return {"ansible_facts": {"devices_wwn": dev_dict}}
def main():
module = AnsibleModule(
argument_spec=dict(
devices=dict(required=True, type='list'),
),
supports_check_mode=True,
)
devices = module.params['devices']
data = get_devices_wwn(devices, module)
module.exit_json(**data)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

View File

@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
GIB = 1 << 30
EXTRA_PARAMS = set(['wwn', 'serial', 'wwn_with_extension',
'wwn_vendor_extension'])
# NOTE: ansible calculates device size as float with 2-digits precision,
# Ironic requires size in GiB, if we will use ansible size parameter
# a bug is possible for devices > 1 TB
def size_gib(device_info):
sectors = device_info.get('sectors')
sectorsize = device_info.get('sectorsize')
if sectors is None or sectorsize is None:
return '0'
return str((int(sectors) * int(sectorsize)) // GIB)
def merge_devices_info(devices, devices_wwn):
merged_info = devices.copy()
for device in merged_info:
if device in devices_wwn:
merged_info[device].update(devices_wwn[device])
# replace size
merged_info[device]['size'] = size_gib(merged_info[device])
return merged_info
def root_hint(hints, devices):
hint = None
name = hints.pop('name', None)
for device in devices:
for key in hints:
if hints[key] != devices[device].get(key):
break
else:
# If multiple hints are specified, a device must satisfy all
# the hints
dev_name = '/dev/' + device
if name is None or name == dev_name:
hint = dev_name
break
return hint
def main():
module = AnsibleModule(
argument_spec=dict(
root_device_hints=dict(required=True, type='dict'),
ansible_devices=dict(required=True, type='dict'),
ansible_devices_wwn=dict(required=True, type='dict')
),
supports_check_mode=True)
hints = module.params['root_device_hints']
devices = module.params['ansible_devices']
devices_wwn = module.params['ansible_devices_wwn']
if not devices_wwn:
extra = set(hints) & EXTRA_PARAMS
if extra:
module.fail_json(msg='Extra hints (supported by additional ansible'
' module) are set but this information can not be'
' collected. Extra hints: %s' % ', '.join(extra))
devices_info = merge_devices_info(devices, devices_wwn or {})
hint = root_hint(hints, devices_info)
if hint is None:
module.fail_json(msg='Root device hints are set, but none of the '
'devices satisfy them. Collected devices info: %s'
% devices_info)
ret_data = {'ansible_facts': {'ironic_root_device': hint}}
module.exit_json(**ret_data)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

View File

@ -0,0 +1,118 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import string
import requests
# adapted from IPA
DEFAULT_CHUNK_SIZE = 1024 * 1024 # 1MB
class StreamingDownloader(object):
def __init__(self, url, chunksize, hash_algo=None, verify=True,
certs=None):
if hash_algo is not None:
self.hasher = hashlib.new(hash_algo)
else:
self.hasher = None
self.chunksize = chunksize
resp = requests.get(url, stream=True, verify=verify, certs=certs)
if resp.status_code != 200:
raise Exception('Invalid response code: %s' % resp.status_code)
self._request = resp
def __iter__(self):
for chunk in self._request.iter_content(chunk_size=self.chunksize):
if self.hasher is not None:
self.hasher.update(chunk)
yield chunk
def checksum(self):
if self.hasher is not None:
return self.hasher.hexdigest()
def stream_to_dest(url, dest, chunksize, hash_algo, verify=True, certs=None):
downloader = StreamingDownloader(url, chunksize, hash_algo,
verify=verify, certs=certs)
with open(dest, 'wb+') as f:
for chunk in downloader:
f.write(chunk)
return downloader.checksum()
def main():
module = AnsibleModule(
argument_spec=dict(
url=dict(required=True, type='str'),
dest=dict(required=True, type='str'),
checksum=dict(required=False, type='str', default=''),
chunksize=dict(required=False, type='int',
default=DEFAULT_CHUNK_SIZE),
validate_certs=dict(required=False, type='bool', default=True),
client_cert=dict(required=False, type='str', default=''),
client_key=dict(required=False, type='str', default='')
))
url = module.params['url']
dest = module.params['dest']
checksum = module.params['checksum']
chunksize = module.params['chunksize']
validate = module.params['validate_certs']
client_cert = module.params['client_cert']
client_key = module.params['client_key']
if client_cert:
certs = (client_cert, client_key) if client_key else client_cert
else:
certs = None
if checksum == '':
hash_algo, checksum = None, None
else:
try:
hash_algo, checksum = checksum.rsplit(':', 1)
except ValueError:
module.fail_json(msg='The checksum parameter has to be in format '
'"<algorithm>:<checksum>"')
checksum = checksum.lower()
if not all(c in string.hexdigits for c in checksum):
module.fail_json(msg='The checksum must be valid HEX number')
if hash_algo not in hashlib.algorithms_available:
module.fail_json(msg="%s checksums are not supported" % hash_algo)
try:
actual_checksum = stream_to_dest(
url, dest, chunksize, hash_algo, verify=validate, certs=certs)
except Exception as e:
module.fail_json(msg=str(e))
else:
if hash_algo and actual_checksum != checksum:
module.fail_json(msg='Invalid dest checksum')
else:
module.exit_json(changed=True)
# NOTE(pas-ha) Ansible's module_utils.basic is licensed under BSD (2 clause)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

View File

@ -0,0 +1 @@
sectors_to_wipe: 1024

View File

@ -0,0 +1,6 @@
- import_tasks: zap.yaml
tags:
- zap
- import_tasks: shred.yaml
tags:
- shred

View File

@ -0,0 +1,8 @@
- name: clean block devices
become: yes
command: shred -f -z /dev/{{ item.key }}
async: 3600
poll: 30
with_dict: "{{ ansible_devices }}"
when:
- item.value.host

View File

@ -0,0 +1,24 @@
- name: store start and end of disk
set_fact:
start_sectors:
- 0
end_sectors:
- "{{ (device.value.sectors | int) - sectors_to_wipe }}"
when:
- device.value.host
- name: update start and end sectors with such for partitions
set_fact:
start_sectors: "{{ start_sectors + [item.value.start | int ] }}"
end_sectors: "{{ end_sectors + [ (item.value.start | int) + ( item.value.sectors | int) - sectors_to_wipe ] }}"
with_dict: "{{ device.value.partitions }}"
when:
- device.value.host
- name: wipe starts and ends of disks and partitions
command: dd if=/dev/zero of=/dev/{{ device.key }} ibs={{ device.value.sectorsize }} obs={{ device.value.sectorsize }} count={{ sectors_to_wipe }} seek={{ item }}
with_flattened:
- "{{ start_sectors | map('int') | list | sort (reverse=True) }}"
- "{{ end_sectors | map('int') | list | sort (reverse=True) }}"
when:
- device.value.host

View File

@ -0,0 +1,16 @@
# NOTE(pas-ha) this is to ensure that partition metadata that might be stored
# in the start or end of partiton itself also becomes unusable
# and does not interfere with future partition scheme if new partitions
# happen to fall on the same boundaries where old partitions were.
# NOTE(pas-ha) loop_control works with Ansible >= 2.1
- include_tasks: wipe.yaml
with_dict: "{{ ansible_devices }}"
loop_control:
loop_var: device
- name: wipe general partition table metadata
become: yes
command: sgdisk -Z /dev/{{ item.key }}
with_dict: "{{ ansible_devices }}"
when:
- item.value.host

View File

@ -0,0 +1 @@
tmp_rootfs_mount: /tmp/rootfs

View File

@ -0,0 +1,79 @@
- name: discover grub-install command
find:
paths:
- "{{ tmp_rootfs_mount }}/usr/sbin"
pattern: "grub*-install"
register: grub_install_found
- name: discover grub-mkconfig command
find:
paths:
- "{{ tmp_rootfs_mount }}/usr/sbin"
pattern: "grub*-mkconfig"
register: grub_config_found
- name: find grub config file
find:
paths:
- "{{ tmp_rootfs_mount }}/boot"
pattern: "grub*.cfg"
recurse: yes
register: grub_file_found
- name: test if all needed grub files were found
assert:
that:
- "{{ grub_install_found.matched > 0 }}"
- "{{ grub_config_found.matched > 0 }}"
- "{{ grub_file_found.matched > 0 }}"
- name: set paths to grub commands
set_fact:
grub_install_cmd: "{{ grub_install_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
grub_config_cmd: "{{ grub_config_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
grub_config_file: "{{ grub_file_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
- name: make dirs for chroot
become: yes
file:
state: directory
path: "{{ tmp_rootfs_mount }}/{{ item }}"
with_items:
- dev
- sys
- proc
- name: mount dirs for chroot
become: yes
command: mount -o bind /{{ item }} {{ tmp_rootfs_mount }}/{{ item }}
with_items:
- dev
- sys
- proc
- block:
- name: get grub version string
become: yes
command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_install_cmd }} --version'
register: grub_version_string
- name: install grub to disk
become: yes
command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_install_cmd }} {{ ironic_root_device }}'
- name: preload lvm modules for grub2
become: yes
lineinfile:
dest: "{{ tmp_rootfs_mount }}/etc/default/grub"
state: present
line: GRUB_PRELOAD_MODULES=lvm
when: grub_version_string.stdout.split() | last | first == '2'
- name: create grub config
become: yes
command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_config_cmd }} -o {{ grub_config_file }}'
always:
- name: unmount dirs for chroot
become: yes
command: umount {{ tmp_rootfs_mount }}/{{ item }}
with_items:
- dev
- sys
- proc

View File

@ -0,0 +1,4 @@
- import_tasks: mounts.yaml
when: ironic.image.type | default('whole-disk-image') == 'partition'
- import_tasks: grub.yaml
when: ironic.image.type | default('whole-disk-image') == 'partition'

View File

@ -0,0 +1,8 @@
- name: create tmp mount point for root
file:
state: directory
path: "{{ tmp_rootfs_mount }}"
- name: mount user image root
become: yes
command: mount {{ ironic_image_target }} {{ tmp_rootfs_mount }}

View File

@ -0,0 +1,110 @@
#!/bin/sh
# Copyright 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(pas-ha) this is mostly copied over from Ironic Python Agent
# compared to the original file in IPA,
# TODO(pas-ha) rewrite this shell script to be a proper Ansible module
# This should work with almost any image that uses MBR partitioning and
# doesn't already have 3 or more partitions -- or else you'll no longer
# be able to create extended partitions on the disk.
# Takes one argument - block device
log() {
echo "`basename $0`: $@"
}
fail() {
log "Error: $@"
exit 1
}
MAX_DISK_PARTITIONS=128
MAX_MBR_SIZE_MB=2097152
DEVICE="$1"
[ -b $DEVICE ] || fail "(DEVICE) $DEVICE is not a block device"
# We need to run partx -u to ensure all partitions are visible so the
# following blkid command returns partitions just imaged to the device
partx -u $DEVICE || fail "running partx -u $DEVICE"
# todo(jayf): partx -u doesn't work in all cases, but partprobe fails in
# devstack. We run both commands now as a temporary workaround for bug 1433812
# long term, this should all be refactored into python and share code with
# the other partition-modifying code in the agent.
partprobe $DEVICE || true
# Check for preexisting partition for configdrive
EXISTING_PARTITION=`/sbin/blkid -l -o device $DEVICE -t LABEL=config-2`
if [ -z $EXISTING_PARTITION ]; then
# Check if it is GPT partition and needs to be re-sized
if [ `partprobe $DEVICE print 2>&1 | grep "fix the GPT to use all of the space"` ]; then
log "Fixing GPT to use all of the space on device $DEVICE"
sgdisk -e $DEVICE || fail "move backup GPT data structures to the end of ${DEVICE}"
# Need to create new partition for config drive
# Not all images have partion numbers in a sequential numbers. There are holes.
# These holes get filled up when a new partition is created.
TEMP_DIR="$(mktemp -d)"
EXISTING_PARTITION_LIST=$TEMP_DIR/existing_partitions
UPDATED_PARTITION_LIST=$TEMP_DIR/updated_partitions
gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $EXISTING_PARTITION_LIST
# Create small partition at the end of the device
log "Adding configdrive partition to $DEVICE"
sgdisk -n 0:-64MB:0 $DEVICE || fail "creating configdrive on ${DEVICE}"
gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $UPDATED_PARTITION_LIST
CONFIG_PARTITION_ID=`diff $EXISTING_PARTITION_LIST $UPDATED_PARTITION_LIST | tail -n1 |awk '{print $2}'`
ISO_PARTITION="${DEVICE}${CONFIG_PARTITION_ID}"
else
log "Working on MBR only device $DEVICE"
# get total disk size, to detect if that exceeds 2TB msdos limit
disksize_bytes=$(blockdev --getsize64 $DEVICE)
disksize_mb=$(( ${disksize_bytes%% *} / 1024 / 1024))
startlimit=-64MiB
endlimit=-0
if [ "$disksize_mb" -gt "$MAX_MBR_SIZE_MB" ]; then
# Create small partition at 2TB limit
startlimit=$(($MAX_MBR_SIZE_MB - 65))
endlimit=$(($MAX_MBR_SIZE_MB - 1))
fi
log "Adding configdrive partition to $DEVICE"
parted -a optimal -s -- $DEVICE mkpart primary fat32 $startlimit $endlimit || fail "creating configdrive on ${DEVICE}"
# Find partition we just created
# Dump all partitions, ignore empty ones, then get the last partition ID
ISO_PARTITION=`sfdisk --dump $DEVICE | grep -v ' 0,' | tail -n1 | awk -F ':' '{print $1}' | sed -e 's/\s*$//'` || fail "finding ISO partition created on ${DEVICE}"
# Wait for udev to pick up the partition
udevadm settle --exit-if-exists=$ISO_PARTITION
fi
else
log "Existing configdrive found on ${DEVICE} at ${EXISTING_PARTITION}"
ISO_PARTITION=$EXISTING_PARTITION
fi
# Output the created/discovered partition for configdrive
echo "configdrive $ISO_PARTITION"

View File

@ -0,0 +1,44 @@
- name: download configdrive data
get_url:
url: "{{ ironic.configdrive.location }}"
dest: /tmp/{{ inventory_hostname }}.gz.base64
validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
async: 600
poll: 15
when: ironic.configdrive.type|default('') == 'url'
- block:
- name: copy configdrive file to node
copy:
src: "{{ ironic.configdrive.location }}"
dest: /tmp/{{ inventory_hostname }}.gz.base64
- name: remove configdrive from conductor
delegate_to: conductor
file:
path: "{{ ironic.configdrive.location }}"
state: absent
when: ironic.configdrive.type|default('') == 'file'
- name: unpack configdrive
shell: cat /tmp/{{ inventory_hostname }}.gz.base64 | base64 --decode | gunzip > /tmp/{{ inventory_hostname }}.cndrive
- block:
- name: prepare config drive partition
become: yes
script: partition_configdrive.sh {{ ironic_root_device }}
register: configdrive_partition_output
- name: test the output of configdrive partitioner
assert:
that:
- "{{ (configdrive_partition_output.stdout_lines | last).split() | length == 2 }}"
- "{{ (configdrive_partition_output.stdout_lines | last).split() | first == 'configdrive' }}"
- name: store configdrive partition
set_fact:
ironic_configdrive_target: "{{ (configdrive_partition_output.stdout_lines | last).split() | last }}"
when: ironic_configdrive_target is undefined
- name: write configdrive
become: yes
command: dd if=/tmp/{{ inventory_hostname }}.cndrive of={{ ironic_configdrive_target }} bs=64K oflag=direct

View File

@ -0,0 +1,13 @@
- name: check that downloaded image will fit into memory
assert:
that: "{{ ansible_memfree_mb }} >= {{ ironic.image.mem_req }}"
msg: "The image size is too big, no free memory available"
- name: download image with checksum validation
get_url:
url: "{{ ironic.image.url }}"
dest: /tmp/{{ inventory_hostname }}.img
checksum: "{{ ironic.image.checksum|default(omit) }}"
validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
async: 600
poll: 15

View File

@ -0,0 +1,7 @@
- import_tasks: download.yaml
when: ironic.image.disk_format != 'raw'
- import_tasks: write.yaml
- import_tasks: configdrive.yaml
when: ironic.configdrive is defined

View File

@ -0,0 +1,20 @@
- name: convert and write
become: yes
command: qemu-img convert -t directsync -O host_device /tmp/{{ inventory_hostname }}.img {{ ironic_image_target }}
async: 1200
poll: 10
when: ironic.image.disk_format != 'raw'
- name: stream to target
become: yes
stream_url:
url: "{{ ironic.image.url }}"
dest: "{{ ironic_image_target }}"
checksum: "{{ ironic.image.checksum|default(omit) }}"
validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
async: 600
poll: 15
when: ironic.image.disk_format == 'raw'
- name: flush
command: sync

View File

@ -0,0 +1,13 @@
- import_tasks: roothints.yaml
when: ironic.root_device_hints is defined
- set_fact:
ironic_root_device: /dev/{{ item.key }}
with_dict: "{{ ansible_devices }}"
when:
- ironic_root_device is undefined
- item.value.host
- set_fact:
ironic_image_target: "{{ ironic_root_device }}"
when: ironic_image_target is undefined

View File

@ -0,0 +1,9 @@
- name: get devices wwn facts
facts_wwn:
devices: "{{ ansible_devices.keys() }}"
- name: calculate root hint
root_hints:
root_device_hints: "{{ ironic.root_device_hints }}"
ansible_devices: "{{ ansible_devices }}"
ansible_devices_wwn: "{{ devices_wwn | default({}) }}"

View File

@ -0,0 +1,2 @@
- import_tasks: parted.yaml
when: ironic.image.type | default('whole-disk-image') == 'partition'

View File

@ -0,0 +1,45 @@
# this is to handle no autocleaning in ironic
- name: erase partition table
become: yes
command: dd if=/dev/zero of={{ ironic_root_device }} bs=512 count=36
when: not ironic.partition_info.preserve_ephemeral|default('no')|bool
- name: run parted
become: yes
parted:
device: "{{ ironic_root_device }}"
label: "{{ ironic.partition_info.label }}"
state: "{{ item.1.state | default('present') }}"
name: "{{ item.1.name | default(omit) }}"
number: "{{ item.1.number }}"
part_type: "{{ item.1.part_type | default(omit) }}"
part_start: "{{ item.1.part_start }}"
part_end: "{{ item.1.part_end }}"
flags: "{{ item.1.flags | default(omit) }}"
align: "{{ item.1.align | default(omit) }}"
unit: "{{ item.1.unit | default(omit) }}"
with_items:
- "{{ ironic.partition_info.partitions.items() | sort(attribute='1.number') }}"
- name: reset image target to root partition
set_fact:
ironic_image_target: "{{ ironic_root_device }}{{ ironic.partition_info.partitions.root.number }}"
- name: make swap
become: yes
command: mkswap -L swap1 "{{ ironic_root_device }}{{ ironic.partition_info.partitions.swap.number }}"
when: ironic.partition_info.partitions.swap is defined
- name: format ephemeral partition
become: yes
filesystem:
dev: "{{ ironic_root_device }}{{ ironic.partition_info.partitions.ephemeral.number }}"
fstype: "{{ ironic.partition_info.ephemeral_format }}"
force: yes
opts: "-L ephemeral0"
when: ironic.partition_info.partitions.ephemeral is defined and not ironic.partition_info.preserve_ephemeral|default('no')|bool
- name: save block device for configdrive if partition was created
set_fact:
ironic_configdrive_target: "{{ ironic_root_device }}{{ ironic.partition_info.partitions.configdrive.number }}"
when: ironic.partition_info.partitions.configdrive is defined

View File

@ -0,0 +1,6 @@
- name: soft power off
become: yes
shell: sleep 5 && poweroff
async: 1
poll: 0
ignore_errors: true

View File

@ -0,0 +1,6 @@
---
- import_playbook: add-ironic-nodes.yaml
- hosts: ironic
roles:
- shutdown

View File

@ -0,0 +1,870 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironic_lib import utils as irlib_utils
import mock
from oslo_concurrency import processutils
import six
from ironic.common import exception
from ironic.common import states
from ironic.common import utils as com_utils
from ironic.conductor import task_manager
from ironic.conductor import utils
from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
INSTANCE_INFO = {
'image_source': 'fake-image',
'image_url': 'http://image',
'image_checksum': 'checksum',
'image_disk_format': 'qcow2',
'root_mb': 5120,
'swap_mb': 0,
'ephemeral_mb': 0
}
DRIVER_INFO = {
'deploy_kernel': 'glance://deploy_kernel_uuid',
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
'ansible_deploy_username': 'test',
'ansible_deploy_key_file': '/path/key',
'ipmi_address': '127.0.0.1',
}
DRIVER_INTERNAL_INFO = {
'is_whole_disk_image': True,
'clean_steps': []
}
class AnsibleDeployTestCaseBase(db_base.DbTestCase):
def setUp(self):
super(AnsibleDeployTestCaseBase, self).setUp()
self.config(enabled_deploy_interfaces='direct,iscsi,ansible')
mgr_utils.mock_the_extension_manager(driver='ipmi',
namespace='ironic.hardware.types')
node = {
'driver': 'ipmi',
'deploy_interface': 'ansible',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **node)
class TestAnsibleMethods(AnsibleDeployTestCaseBase):
def test__parse_ansible_driver_info(self):
playbook, user, key = ansible_deploy._parse_ansible_driver_info(
self.node, 'deploy')
self.assertEqual(ansible_deploy.DEFAULT_PLAYBOOKS['deploy'], playbook)
self.assertEqual('test', user)
self.assertEqual('/path/key', key)
def test__parse_ansible_driver_info_no_playbook(self):
self.assertRaises(exception.IronicException,
ansible_deploy._parse_ansible_driver_info,
self.node, 'test')
def test__get_node_ip(self):
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://1.2.3.4:5678'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual('1.2.3.4',
ansible_deploy._get_node_ip(task))
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(group='ansible', verbosity=3)
self.config(group='ansible', ansible_extra_args='--timeout=100')
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key',
tags=['spam'], notags=['ham'])
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
'--tags=spam', '--skip-tags=ham',
'--private-key=/path/to/key', '-vvv', '--timeout=100')
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook_default_verbosity_nodebug(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(debug=False)
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key')
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
'--private-key=/path/to/key')
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook_default_verbosity_debug(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(debug=True)
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key')
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
'--private-key=/path/to/key', '-vvvv')
@mock.patch.object(com_utils, 'execute',
side_effect=processutils.ProcessExecutionError(
description='VIKINGS!'),
autospec=True)
def test__run_playbook_fail(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(debug=False)
extra_vars = {'foo': 'bar'}
exc = self.assertRaises(exception.InstanceDeployFailure,
ansible_deploy._run_playbook,
'deploy', extra_vars, '/path/to/key')
self.assertIn('VIKINGS!', six.text_type(exc))
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
'--private-key=/path/to/key')
def test__parse_partitioning_info_root_msdos(self):
expected_info = {
'partition_info': {
'label': 'msdos',
'partitions': {
'root':
{'number': 1,
'part_start': '1MiB',
'part_end': '5121MiB',
'flags': ['boot']}
}}}
i_info = ansible_deploy._parse_partitioning_info(self.node)
self.assertEqual(expected_info, i_info)
def test__parse_partitioning_info_all_gpt(self):
in_info = dict(INSTANCE_INFO)
in_info['swap_mb'] = 128
in_info['ephemeral_mb'] = 256
in_info['ephemeral_format'] = 'ext4'
in_info['preserve_ephemeral'] = True
in_info['configdrive'] = 'some-fake-user-data'
in_info['capabilities'] = {'disk_label': 'gpt'}
self.node.instance_info = in_info
self.node.save()
expected_info = {
'partition_info': {
'label': 'gpt',
'ephemeral_format': 'ext4',
'preserve_ephemeral': 'yes',
'partitions': {
'bios':
{'number': 1,
'name': 'bios',
'part_start': '1MiB',
'part_end': '2MiB',
'flags': ['bios_grub']},
'ephemeral':
{'number': 2,
'part_start': '2MiB',
'part_end': '258MiB',
'name': 'ephemeral'},
'swap':
{'number': 3,
'part_start': '258MiB',
'part_end': '386MiB',
'name': 'swap'},
'configdrive':
{'number': 4,
'part_start': '386MiB',
'part_end': '450MiB',
'name': 'configdrive'},
'root':
{'number': 5,
'part_start': '450MiB',
'part_end': '5570MiB',
'name': 'root'}
}}}
i_info = ansible_deploy._parse_partitioning_info(self.node)
self.assertEqual(expected_info, i_info)
@mock.patch.object(ansible_deploy.images, 'download_size', autospec=True)
def test__calculate_memory_req(self, image_mock):
self.config(group='ansible', extra_memory=1)
image_mock.return_value = 2000000 # < 2MiB
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(2, ansible_deploy._calculate_memory_req(task))
image_mock.assert_called_once_with(task.context, 'fake-image')
def test__get_configdrive_path(self):
self.config(tempdir='/path/to/tmpdir')
self.assertEqual('/path/to/tmpdir/spam.cndrive',
ansible_deploy._get_configdrive_path('spam'))
def test__prepare_extra_vars(self):
host_list = [('fake-uuid', '1.2.3.4', 'spam', 'ham'),
('other-uuid', '5.6.7.8', 'eggs', 'vikings')]
ansible_vars = {"foo": "bar"}
self.assertEqual(
{"nodes": [
{"name": "fake-uuid", "ip": '1.2.3.4',
"user": "spam", "extra": "ham"},
{"name": "other-uuid", "ip": '5.6.7.8',
"user": "eggs", "extra": "vikings"}],
"foo": "bar"},
ansible_deploy._prepare_extra_vars(host_list, ansible_vars))
def test__parse_root_device_hints(self):
hints = {"wwn": "fake wwn", "size": "12345", "rotational": True}
expected = {"wwn": "fake wwn", "size": 12345, "rotational": True}
props = self.node.properties
props['root_device'] = hints
self.node.properties = props
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
expected, ansible_deploy._parse_root_device_hints(task.node))
def test__parse_root_device_hints_fail_advanced(self):
hints = {"wwn": "s!= fake wwn",
"size": ">= 12345",
"name": "<or> spam <or> ham",
"rotational": True}
expected = {"wwn": "s!= fake%20wwn",
"name": "<or> spam <or> ham",
"size": ">= 12345"}
props = self.node.properties
props['root_device'] = hints
self.node.properties = props
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
exc = self.assertRaises(
exception.InvalidParameterValue,
ansible_deploy._parse_root_device_hints, task.node)
for key, value in expected.items():
self.assertIn(six.text_type(key), six.text_type(exc))
self.assertIn(six.text_type(value), six.text_type(exc))
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
def test__prepare_variables(self, mem_req_mock):
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"mem_req": 2000,
"disk_format": "qcow2",
"checksum": "md5:checksum"}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
def test__prepare_variables_root_device_hints(self, mem_req_mock):
props = self.node.properties
props['root_device'] = {"wwn": "fake-wwn"}
self.node.properties = props
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"mem_req": 2000,
"disk_format": "qcow2",
"checksum": "md5:checksum"},
"root_device_hints": {"wwn": "fake-wwn"}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
def test__prepare_variables_noglance(self, mem_req_mock):
self.config(image_store_insecure=True, group='ansible')
i_info = self.node.instance_info
i_info['image_checksum'] = 'sha256:checksum'
self.node.instance_info = i_info
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "no",
"source": "fake-image",
"mem_req": 2000,
"disk_format": "qcow2",
"checksum": "sha256:checksum"}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
def test__prepare_variables_configdrive_url(self, mem_req_mock):
i_info = self.node.instance_info
i_info['configdrive'] = 'http://configdrive_url'
self.node.instance_info = i_info
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"mem_req": 2000,
"disk_format": "qcow2",
"checksum": "md5:checksum"},
'configdrive': {'type': 'url',
'location': 'http://configdrive_url'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
def test__prepare_variables_configdrive_file(self, mem_req_mock):
i_info = self.node.instance_info
i_info['configdrive'] = 'fake-content'
self.node.instance_info = i_info
self.node.save()
self.config(tempdir='/path/to/tmpfiles')
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"mem_req": 2000,
"disk_format": "qcow2",
"checksum": "md5:checksum"},
'configdrive': {'type': 'file',
'location': '/path/to/tmpfiles/%s.cndrive'
% self.node.uuid}}
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
create=True) as open_mock:
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
open_mock.assert_has_calls((
mock.call('/path/to/tmpfiles/%s.cndrive' % self.node.uuid,
'w'),
mock.call().__enter__(),
mock.call().write('fake-content'),
mock.call().__exit__(None, None, None)))
def test__validate_clean_steps(self):
steps = [{"interface": "deploy",
"name": "foo",
"args": {"spam": {"required": True, "value": "ham"}}},
{"name": "bar",
"interface": "deploy"}]
self.assertIsNone(ansible_deploy._validate_clean_steps(
steps, self.node.uuid))
def test__validate_clean_steps_missing(self):
steps = [{"name": "foo",
"interface": "deploy",
"args": {"spam": {"value": "ham"},
"ham": {"required": True}}},
{"name": "bar"},
{"interface": "deploy"}]
exc = self.assertRaises(exception.NodeCleaningFailure,
ansible_deploy._validate_clean_steps,
steps, self.node.uuid)
self.assertIn("name foo, field ham.value", six.text_type(exc))
self.assertIn("name bar, field interface", six.text_type(exc))
self.assertIn("name undefined, field name", six.text_type(exc))
def test__validate_clean_steps_names_not_unique(self):
steps = [{"name": "foo",
"interface": "deploy"},
{"name": "foo",
"interface": "deploy"}]
exc = self.assertRaises(exception.NodeCleaningFailure,
ansible_deploy._validate_clean_steps,
steps, self.node.uuid)
self.assertIn("unique names", six.text_type(exc))
@mock.patch.object(ansible_deploy.yaml, 'safe_load', autospec=True)
def test__get_clean_steps(self, load_mock):
steps = [{"interface": "deploy",
"name": "foo",
"args": {"spam": {"required": True, "value": "ham"}}},
{"name": "bar",
"interface": "deploy",
"priority": 100}]
load_mock.return_value = steps
expected = [{"interface": "deploy",
"step": "foo",
"priority": 10,
"abortable": False,
"argsinfo": {"spam": {"required": True}},
"args": {"spam": "ham"}},
{"interface": "deploy",
"step": "bar",
"priority": 100,
"abortable": False,
"argsinfo": {},
"args": {}}]
d_info = self.node.driver_info
d_info['ansible_clean_steps_config'] = 'custom_clean'
self.node.driver_info = d_info
self.node.save()
self.config(group='ansible', playbooks_path='/path/to/playbooks')
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
create=True) as open_mock:
self.assertEqual(
expected,
ansible_deploy._get_clean_steps(
self.node, interface="deploy",
override_priorities={"foo": 10}))
open_mock.assert_has_calls((
mock.call('/path/to/playbooks/custom_clean'),))
load_mock.assert_called_once_with(
open_mock().__enter__.return_value)
class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
def setUp(self):
super(TestAnsibleDeploy, self).setUp()
self.driver = ansible_deploy.AnsibleDeploy()
def test_get_properties(self):
self.assertEqual(
set(list(ansible_deploy.COMMON_PROPERTIES) +
['deploy_forces_oob_reboot']),
set(self.driver.get_properties()))
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate(self, pxe_boot_validate_mock, check_params_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
check_params_mock.assert_called_once_with(
{'instance_info.image_source': INSTANCE_INFO['image_source']},
mock.ANY)
@mock.patch.object(deploy_utils, 'get_boot_option',
return_value='netboot', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_not_iwdi_netboot(self, pxe_boot_validate_mock,
get_boot_mock):
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
get_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_deploy(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
power_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.'
'build_instance_info_for_deploy',
return_value={'test': 'test'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
def test_prepare(self, pxe_prepare_ramdisk_mock,
build_instance_info_mock, build_options_mock,
power_action_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
with mock.patch.object(task.driver.network,
'add_provisioning_network',
autospec=True) as net_mock:
self.driver.prepare(task)
net_mock.assert_called_once_with(task)
power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
task, {'op1': 'test1'})
self.node.refresh()
self.assertEqual('test', self.node.instance_info['test'])
@mock.patch.object(ansible_deploy, '_get_configdrive_path',
return_value='/path/test', autospec=True)
@mock.patch.object(irlib_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_clean_up(self, pxe_clean_up_mock, unlink_mock,
get_cfdrive_path_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
pxe_clean_up_mock.assert_called_once_with(task)
get_cfdrive_path_mock.assert_called_once_with(self.node['uuid'])
unlink_mock.assert_called_once_with('/path/test')
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
def test_get_clean_steps(self, get_clean_steps_mock):
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'},
{'priority': 99, 'interface': 'deploy',
'step': 'erase_devices_metadata'},
]
get_clean_steps_mock.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
get_clean_steps_mock.assert_called_once_with(
task.node, interface='deploy',
override_priorities={
'erase_devices': None,
'erase_devices_metadata': None})
self.assertEqual(mock_steps, steps)
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
def test_get_clean_steps_priority(self, mock_get_clean_steps):
self.config(erase_devices_priority=9, group='deploy')
self.config(erase_devices_metadata_priority=98, group='deploy')
mock_steps = [{'priority': 9, 'interface': 'deploy',
'step': 'erase_devices'},
{'priority': 98, 'interface': 'deploy',
'step': 'erase_devices_metadata'},
]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task.node, interface='deploy',
override_priorities={'erase_devices': 9,
'erase_devices_metadata': 98})
self.assertEqual(mock_steps, steps)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
def test_execute_clean_step(self, parse_driver_info_mock,
prepare_extra_mock, run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'args': {'tags': ['clean']}}
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'], '127.0.0.1', 'test_u', {})]}
prepare_extra_mock.return_value = ironic_nodes
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://127.0.0.1'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.execute_clean_step(task, step)
parse_driver_info_mock.assert_called_once_with(
task.node, action='clean')
prepare_extra_mock.assert_called_once_with(
ironic_nodes['ironic_nodes'])
run_playbook_mock.assert_called_once_with(
'test_pl', ironic_nodes, 'test_k', tags=['clean'])
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
def test_execute_clean_step_no_success_log(
self, log_mock, run_mock, utils_mock, parse_driver_info_mock):
run_mock.side_effect = exception.InstanceDeployFailure('Boom')
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'args': {'tags': ['clean']}}
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://127.0.0.1'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.execute_clean_step(task, step)
log_mock.error.assert_called_once_with(
mock.ANY, {'node': task.node['uuid'],
'step': 'erase_devices'})
utils_mock.assert_called_once_with(task, 'Boom')
self.assertFalse(log_mock.info.called)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
def test_prepare_cleaning(
self, prepare_ramdisk_mock, buid_options_mock, power_action_mock,
set_node_cleaning_steps, run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'tags': ['clean']}
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['clean_steps'] = [step]
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.add_cleaning_network = mock.Mock()
state = self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
task.driver.network.add_cleaning_network.assert_called_once_with(
task)
buid_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
self.assertFalse(run_playbook_mock.called)
self.assertEqual(states.CLEANWAIT, state)
@mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True)
def test_prepare_cleaning_callback_no_steps(self,
set_node_cleaning_steps):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.add_cleaning_network = mock.Mock()
self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
self.assertFalse(task.driver.network.add_cleaning_network.called)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_tear_down_cleaning(self, clean_ramdisk_mock, power_action_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.remove_cleaning_network = mock.Mock()
self.driver.tear_down_cleaning(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
clean_ramdisk_mock.assert_called_once_with(task)
(task.driver.network.remove_cleaning_network
.assert_called_once_with(task))
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
def test__ansible_deploy(self, prepare_vars_mock, parse_part_info_mock,
parse_dr_info_mock, prepare_extra_mock,
run_playbook_mock):
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'], '127.0.0.1', 'test_u')]}
prepare_extra_mock.return_value = ironic_nodes
_vars = {
'url': 'image_url',
'checksum': 'aa'}
prepare_vars_mock.return_value = _vars
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.extra = {'ham': 'spam'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver._ansible_deploy(task, '127.0.0.1')
prepare_vars_mock.assert_called_once_with(task)
parse_part_info_mock.assert_called_once_with(task.node)
parse_dr_info_mock.assert_called_once_with(task.node)
prepare_extra_mock.assert_called_once_with(
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
variables=_vars)
run_playbook_mock.assert_called_once_with(
'test_pl', ironic_nodes, 'test_k')
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
def test__ansible_deploy_iwdi(self, prepare_vars_mock,
parse_part_info_mock, parse_dr_info_mock,
prepare_extra_mock, run_playbook_mock):
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'], '127.0.0.1', 'test_u')]}
prepare_extra_mock.return_value = ironic_nodes
_vars = {
'url': 'image_url',
'checksum': 'aa'}
prepare_vars_mock.return_value = _vars
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = True
self.node.driver_internal_info = driver_internal_info
self.node.extra = {'ham': 'spam'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver._ansible_deploy(task, '127.0.0.1')
prepare_vars_mock.assert_called_once_with(task)
self.assertFalse(parse_part_info_mock.called)
parse_dr_info_mock.assert_called_once_with(task.node)
prepare_extra_mock.assert_called_once_with(
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
variables=_vars)
run_playbook_mock.assert_called_once_with('test_pl', ironic_nodes,
'test_k')
@mock.patch.object(fake.FakePower, 'get_power_state',
return_value=states.POWER_OFF)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_reboot_and_finish_deploy_force_reboot(self, power_action_mock,
get_pow_state_mock):
d_info = self.node.driver_info
d_info['deploy_forces_oob_reboot'] = True
self.node.driver_info = d_info
self.node.save()
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(task.driver, 'network') as net_mock:
self.driver.reboot_and_finish_deploy(task)
net_mock.remove_provisioning_network.assert_called_once_with(
task)
net_mock.configure_tenant_networks.assert_called_once_with(
task)
expected_power_calls = [((task, states.POWER_OFF),),
((task, states.POWER_ON),)]
self.assertEqual(expected_power_calls,
power_action_mock.call_args_list)
get_pow_state_mock.assert_not_called()
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_reboot_and_finish_deploy_soft_poweroff_retry(self,
power_action_mock,
ansible_mock):
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
self.config(group='ansible',
post_deploy_get_power_state_retries=1)
self.node.provision_state = states.DEPLOYING
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://127.0.0.1'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(task.driver, 'network') as net_mock:
with mock.patch.object(task.driver.power,
'get_power_state',
return_value=states.POWER_ON) as p_mock:
self.driver.reboot_and_finish_deploy(task)
p_mock.assert_called_with(task)
self.assertEqual(2, len(p_mock.mock_calls))
net_mock.remove_provisioning_network.assert_called_once_with(
task)
net_mock.configure_tenant_networks.assert_called_once_with(
task)
power_action_mock.assert_has_calls(
[mock.call(task, states.POWER_OFF),
mock.call(task, states.POWER_ON)])
expected_power_calls = [((task, states.POWER_OFF),),
((task, states.POWER_ON),)]
self.assertEqual(expected_power_calls,
power_action_mock.call_args_list)
ansible_mock.assert_called_once_with('shutdown.yaml',
mock.ANY, mock.ANY)
@mock.patch.object(ansible_deploy, '_get_node_ip', autospec=True,
return_value='1.2.3.4')
def test_continue_deploy(self, getip_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.multiple(self.driver, autospec=True,
_ansible_deploy=mock.DEFAULT,
reboot_to_instance=mock.DEFAULT):
self.driver.continue_deploy(task)
getip_mock.assert_called_once_with(task)
self.driver._ansible_deploy.assert_called_once_with(
task, '1.2.3.4')
self.driver.reboot_to_instance.assert_called_once_with(task)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertEqual(states.DEPLOYING, task.node.provision_state)
@mock.patch.object(utils, 'node_set_boot_device', autospec=True)
def test_reboot_to_instance(self, bootdev_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(self.driver, 'reboot_and_finish_deploy',
autospec=True):
task.driver.boot = mock.Mock()
self.driver.reboot_to_instance(task)
bootdev_mock.assert_called_once_with(task, 'disk',
persistent=True)
self.driver.reboot_and_finish_deploy.assert_called_once_with(
task)
task.driver.boot.clean_up_ramdisk.assert_called_once_with(
task)

View File

@ -0,0 +1,11 @@
---
features:
- |
Adds a new ``ansible`` deploy interface. It targets mostly undercloud
use-case by allowing greater customization of provisioning process.
This new deploy interface is usable only with hardware types.
It is set as supported for a ``generic`` hardware type and all
its subclasses, but must be explicitly enabled in the
``[DEFAULT]enabled_deploy_interfaces`` configuration file option
to actually allow setting nodes to use it.

View File

@ -99,6 +99,7 @@ ironic.hardware.interfaces.console =
no-console = ironic.drivers.modules.noop:NoConsole
ironic.hardware.interfaces.deploy =
ansible = ironic.drivers.modules.ansible.deploy:AnsibleDeploy
direct = ironic.drivers.modules.agent:AgentDeploy
fake = ironic.drivers.modules.fake:FakeDeploy
iscsi = ironic.drivers.modules.iscsi_deploy:ISCSIDeploy
@ -187,6 +188,7 @@ autodoc_exclude_modules =
ironic.db.sqlalchemy.alembic.env
ironic.db.sqlalchemy.alembic.versions.*
ironic_tempest_plugin.*
ironic.drivers.modules.ansible.playbooks*
api_doc_dir = contributor/api
[build_sphinx]