Migrating Undercloud playbooks to their own Ansible Role

The validations should be in the form of Ansible roles, in order to be
easily accessed from the CLI but also from Mistral (as it is currently
the case). It will also allow to get a proper documentation, canvas and
gives the possibility to validate the role before running it by ensuring
there are metadata, output and so on.

Note that all the custom modules/lookup have been copied into the their
top-level directory.

Change-Id: If08678a18165ad12b8fa85f201541fe60c928e1b
Implements: blueprint validation-framework
Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
This commit is contained in:
Gael Chamoulaud 2019-02-14 13:31:52 +01:00
parent e6490b3e36
commit d289816733
90 changed files with 4715 additions and 0 deletions

View File

@ -0,0 +1,31 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'fail_if_no_hosts'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
def v2_playbook_on_stats(self, stats):
if len(stats.processed.keys()) == 0:
sys.exit(10)

View File

@ -0,0 +1,198 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import pprint
from ansible.plugins.callback import CallbackBase
FAILURE_TEMPLATE = """\
Task '{}' failed:
Host: {}
Message: {}
"""
WARNING_TEMPLATE = """\
Task '{}' succeeded, but had some warnings:
Host: {}
Warnings:
"""
DEBUG_TEMPLATE = """\
Task: Debug
Host: {}
{}
"""
def indent(text):
'''Indent the given text by four spaces.'''
return ''.join(' {}\n'.format(line) for line in text.splitlines())
def print_failure_message(host_name, task_name, results, abridged_result):
'''Print a human-readable error info from Ansible result dictionary.'''
def is_script(results):
return ('rc' in results and
'invocation' in results and
results['invocation'].get('module_name') == 'script' and
'_raw_params' in results['invocation'].get('module_args', {}))
display_full_results = False
if 'rc' in results and 'cmd' in results:
command = results['cmd']
# The command can be either a list or a string. Concat if it's a list:
if type(command) == list:
command = " ".join(results['cmd'])
message = "Command `{}` exited with code: {}".format(
command, results['rc'])
# There may be an optional message attached to the command. Display it:
if 'msg' in results:
message = message + ": " + results['msg']
elif is_script(results):
script_name = results['invocation']['module_args']['_raw_params']
message = "Script `{}` exited with code: {}".format(
script_name, results['rc'])
elif 'msg' in results:
message = results['msg']
else:
message = "Unknown error"
display_full_results = True
print(FAILURE_TEMPLATE.format(task_name, host_name, message))
stdout = results.get('module_stdout', results.get('stdout', ''))
if stdout:
print('stdout:')
print(indent(stdout))
stderr = results.get('module_stderr', results.get('stderr', ''))
if stderr:
print('stderr:')
print(indent(stderr))
if display_full_results:
print("Could not get an error message. Here is the Ansible output:")
pprint.pprint(abridged_result, indent=4)
warnings = results.get('warnings', [])
if warnings:
print("Warnings:")
for warning in warnings:
print("*", warning)
print("")
# TODO(shadower): test with async settings
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'validation_output'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
def v2_playbook_on_play_start(self, play):
pass # No need to notify that a play started
def v2_playbook_on_task_start(self, task, is_conditional):
pass # No need to notify that a task started
def v2_runner_on_ok(self, result, **kwargs):
host_name = result._host
task_name = result._task.get_name()
results = result._result # A dict of the module name etc.
self._dump_results(results)
warnings = results.get('warnings', [])
# Print only tasks that produced some warnings:
if warnings:
print(WARNING_TEMPLATE.format(task_name, host_name))
for warning in warnings:
print("*", warning)
# Print the result of debug module
if (('invocation' in results) and
('module_name' in results['invocation'])):
if ((results['invocation']['module_name'] == 'debug') and
('module_args' in results['invocation'])):
output = ""
# Variable and its value
if 'var' in results['invocation']['module_args']:
variable = results['invocation']['module_args']['var']
value = results[variable]
output = "{}: {}".format(variable, str(value))
# Debug message
elif 'msg' in results['invocation']['module_args']:
output = "Message: {}".format(
results['invocation']['module_args']['msg'])
print(DEBUG_TEMPLATE.format(host_name, output))
def v2_runner_on_failed(self, result, **kwargs):
host_name = result._host
task_name = result._task.get_name()
result_dict = result._result # A dict of the module name etc.
abridged_result = self._dump_results(result_dict)
if 'results' in result_dict:
# The task is a list of items under `results`
for item in result_dict['results']:
if item.get('failed', False):
print_failure_message(host_name, task_name, item, item)
else:
# The task is a "normal" module invocation
print_failure_message(host_name, task_name, result_dict,
abridged_result)
def v2_runner_on_skipped(self, result, **kwargs):
pass # No need to print skipped tasks
def v2_runner_on_unreachable(self, result, **kwargs):
host_name = result._host
task_name = result._task.get_name()
results = {'msg': 'The host is unreachable.'}
print_failure_message(host_name, task_name, results, results)
def v2_playbook_on_stats(self, stats):
def failed(host):
return (stats.summarize(host).get('failures', 0) > 0 or
stats.summarize(host).get('unreachable', 0) > 0)
hosts = sorted(stats.processed.keys())
failed_hosts = [host for host in hosts if failed(host)]
if hosts:
if failed_hosts:
if len(failed_hosts) == len(hosts):
print("Failure! The validation failed for all hosts:")
for failed_host in failed_hosts:
print("*", failed_host)
else:
print("Failure! The validation failed for hosts:")
for failed_host in failed_hosts:
print("*", failed_host)
print("and passed for hosts:")
for host in [h for h in hosts if h not in failed_hosts]:
print("*", host)
else:
print("Success! The validation passed for all hosts:")
for host in hosts:
print("*", host)
else:
print("Warning! The validation did not run on any host.")

View File

@ -0,0 +1,96 @@
#!/usr/bin/env python
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: advanced_format
short_description: Check for advanced disk format
description:
- Check whether a drive uses advanced format
options:
drive:
required: true
description:
- drive name
type: str
author: "Martin Andre (@mandre)"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Detect whether the drive uses Advanced Format
advanced_format: drive=vda
'''
def read_int(module, file_path):
'''Read a file and convert its value to int.
Raise ansible failure otherwise.
'''
try:
with open(file_path) as f:
file_contents = f.read()
return int(file_contents)
except IOError:
module.fail_json(msg="Cannot open '%s'" % file_path)
except ValueError:
module.fail_json(msg="The '%s' file doesn't contain an integer value" %
file_path)
def main():
module = AnsibleModule(argument_spec=dict(
drive=dict(required=True, type='str')
))
drive = module.params.get('drive')
queue_path = path.join('/sys/class/block', drive, 'queue')
physical_block_size_path = path.join(queue_path, 'physical_block_size')
logical_block_size_path = path.join(queue_path, 'logical_block_size')
physical_block_size = read_int(module, physical_block_size_path)
logical_block_size = read_int(module, logical_block_size_path)
if physical_block_size == logical_block_size:
module.exit_json(
changed=False,
msg="The disk %s probably doesn't use Advance Format." % drive,
)
else:
module.exit_json(
# NOTE(shadower): we're marking this as `changed`, to make it
# visually stand out when running via Ansible directly instead of
# using the API.
#
# The API & UI is planned to look for the `warnings` field and
# display it differently.
changed=True,
warnings=["Physical and logical block sizes of drive %s differ "
"(%s vs. %s). This can mean the disk uses Advance "
"Format." %
(drive, physical_block_size, logical_block_size)],
)
if __name__ == '__main__':
main()

181
library/check_flavors.py Normal file
View File

@ -0,0 +1,181 @@
#!/usr/bin/env python
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule # noqa
import re
DOCUMENTATION = '''
---
module: check_flavors
short_description: Check that assigned flavors exist and are configured
description:
- Validate that the flavors assigned to roles exist and have the correct
settings. Right now, that means that boot_option is unset or set to 'local'
, or if set to 'netboot', issue a warning.
options:
roles_info:
required: true
description:
- A list of role info
type: list
flavors:
required: true
description:
- A dictionary of flavors from Nova
type: dict
author: "Brad P. Crochet"
'''
EXAMPLES = '''
- hosts: undercloud
tasks:
- name: Check the flavors
check_flavors:
roles_info: "{{ lookup('roles_info', wantlist=True) }}"
flavors: "{{ lookup('nova_flavors', wantlist=True) }}"
'''
def validate_roles_and_flavors(roles_info, flavors):
"""Check if roles info is correct
:param roles_info: list of role data
:param flavors: dictionary of flavors
:returns result: Flavors and scale
warnings: List of warning messages
errors: List of error messages
"""
result = {}
errors = []
warnings = []
custom_resource_class = None
custom_resource_class_val = None
message = "Flavor '{1}' provided for the role '{0}', does not exist"
missing_message = "Role '{0}' is in use, but has no flavor assigned"
warning_message = (
'Flavor {0} "capabilities:boot_option" is set to '
'"netboot". Nodes will PXE boot from the ironic '
'conductor instead of using a local bootloader. '
'Make sure that enough nodes are marked with the '
'"boot_option" capability set to "netboot".')
resource_class_missing = (
'Flavor {0} does not have a custom resource class '
'associated with it')
resource_class_name_incorrect = (
'Flavor {0} has an incorrectly named custom '
'resource class associated with it')
resource_class_value_incorrect = (
'Flavor {0} has a resource class that is not '
'offering exactly 1 resource')
disable_standard_scheduling = (
'Flavor {0} has to have scheduling based on '
'standard properties disabled by setting '
'resources:VCPU=0 resources:MEMORY_MB=0 '
'resources:DISK_GB=0 in the flavor property')
for role in roles_info:
target = role.get('name')
flavor_name = role.get('flavor')
scale = role.get('count', 0)
if flavor_name is None or not scale:
if scale:
errors.append(missing_message.format(target))
continue
old_flavor_name, old_scale = result.get(flavor_name, (None, None))
if old_flavor_name:
result[flavor_name] = (old_flavor_name, scale)
else:
flavor = flavors.get(flavor_name)
if flavor:
keys = flavor.get('keys', None)
if keys:
if keys.get('capabilities:boot_option', '') \
== 'netboot':
warnings.append(
warning_message.format(flavor_name))
# check if the baremetal flavor has custom resource class
# required for scheduling since queens
resource_specs = {key.split(
"resources:", 1)[-1]: val
for key, val in keys.items()
if key.startswith("resources:")}
if not resource_specs:
errors.append(resource_class_missing.format(
flavor_name))
else:
for key, val in resource_specs.items():
if key.startswith("CUSTOM_"):
custom_resource_class = True
match = re.match('CUSTOM_[A-Z_]+', key)
if match is None:
errors.append(
resource_class_name_incorrect,
flavor_name)
else:
if val == 1:
custom_resource_class_val = True
if not custom_resource_class:
errors.append(resource_class_missing.format(
flavor_name))
if not custom_resource_class_val:
errors.append(resource_class_value_incorrect.
format(flavor_name))
disk = resource_specs.get("DISK_GB", None)
memory = resource_specs.get("MEMORY_MB", None)
vcpu = resource_specs.get("VCPU", None)
if any(resource != 0 for resource in [disk, memory,
vcpu]):
errors.append(disable_standard_scheduling.
format(flavor_name))
result[flavor_name] = (flavor, scale)
else:
errors.append(message.format(target, flavor_name))
return result, warnings, errors
def main():
module = AnsibleModule(argument_spec=dict(
roles_info=dict(required=True, type='list'),
flavors=dict(required=True, type='dict')
))
roles_info = module.params.get('roles_info')
flavors = module.params.get('flavors')
flavor_result, warnings, errors = validate_roles_and_flavors(roles_info,
flavors)
if errors:
module.fail_json(msg="\n".join(errors))
elif warnings:
module.exit_json(warnings="\n".join(warnings))
else:
module.exit_json(
msg="All flavors configured on roles",
flavors=flavor_result)
if __name__ == '__main__':
main()

151
library/check_package_update.py Executable file
View File

@ -0,0 +1,151 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Check for available updates for a given package."""
import collections
import subprocess
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: check_package_update
short_description: Check for available updates for a given package
options:
package:
required: true
description:
- The name of the package you want to check
type: str
pkg_mgr:
required: true
description:
- Supported Package Manager, DNF or YUM
type: str
author: "Florian Fuchs"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Get available updates for packages
check_package_update:
package: python-tripleoclient
pkg_mgr: {{ ansible_pkg_mgr}}
'''
SUPPORTED_PKG_MGRS = (
'yum',
'dnf',
)
PackageDetails = collections.namedtuple('PackageDetails',
['name', 'arch', 'version'])
def get_package_details(line):
# Parses an output line from a package manager's
# `list (available|installed)` command and returns
# a named tuple
parts = line.rstrip().split()
name, arch = parts[0].split('.')
# Version string, excluding release string and epoch
version = parts[1].split('-')[0].split(':')[-1]
return PackageDetails(name, arch, version)
def _command(command):
# Return the result of a subprocess call
# as [stdout, stderr]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
return process.communicate()
def _get_installed_version_from_output(output, package):
for line in output.split('\n'):
if package in line:
return get_package_details(line)
def _get_latest_available_versions(output, installed):
# Returns the latest available minor and major versions,
# one for each.
latest_minor = None
latest_major = None
# Get all packages with the same architecture
packages = list([get_package_details(line) for line in output.split('\n')
if '{i.name}.{i.arch}'.format(i=installed) in line])
# Get all packages with the *same* major version
minor = sorted((p for p in packages
if p.version[0] == installed.version[0]))
if len(minor) > 0:
latest_minor = minor[-1].version
# Get all packages with a *higher* available major version
major = sorted((p for p in packages
if p.version[0] > installed.version[0]))
if len(major) > 0:
latest_major = major[-1].version
# If the output doesn't contain packages with the same major version
# let's assume the currently installed version as latest minor one.
if latest_minor is None:
latest_minor = installed.version
return latest_minor, latest_major
def check_update(module, package, pkg_mgr):
if pkg_mgr not in SUPPORTED_PKG_MGRS:
module.fail_json(
msg='Package manager "{}" is not supported.'.format(pkg_mgr))
return
installed_stdout, installed_stderr = _command(
[pkg_mgr, 'list', 'installed', package])
# Fail the module if for some reason we can't lookup the current package.
if installed_stderr != '':
module.fail_json(msg=installed_stderr)
return
installed = _get_installed_version_from_output(installed_stdout, package)
available_stdout, available_stderr = _command(
[pkg_mgr, 'list', 'available', installed.name])
latest_minor_version, latest_major_version = \
_get_latest_available_versions(available_stdout, installed)
module.exit_json(changed=False,
name=installed.name,
current_version=installed.version,
latest_minor_version=latest_minor_version,
latest_major_version=latest_major_version)
def main():
module = AnsibleModule(argument_spec=dict(
package=dict(required=True, type='str'),
pkg_mgr=dict(required=True, type='str')
))
check_update(module,
module.params.get('package'),
module.params.get('pkg_mgr'))
if __name__ == '__main__':
main()

249
library/docker_facts.py Normal file
View File

@ -0,0 +1,249 @@
#!/usr/bin/env python
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
six.add_metaclass(type)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: docker_facts
version_added: '2.6'
short_description: Gather list of volumes, images, containers
notes:
- When specifying mulitple filters, only assets matching B(all) filters
will be returned.
description:
- Gather a list of volumes, images, and containers on a running system
- Return both filtered and unfiltered lists of volumes, images,
and containers.
options:
image_filter:
description:
- List of k=v pairs to use as a filter for images.
type: list
required: false
volume_filter:
description:
- List of k=v pairs to use as a filter for volumes.
type: list
required: false
container_filter:
description:
- List of k=v pairs to use as a filter for containers.
type: list
required: false
"""
EXAMPLES = """
- name: Gather Docker facts
docker_facts:
- name: Gather filtered Docker facts
docker_facts:
image_filter:
- dangling=true
volume_filter:
- dangling=true
container_filter:
- status=exited
- status=dead
- name: Remove containers that matched filters
docker_container:
name: "{{ item }}"
state: absent
loop: "{{ docker.containers_filtered | map(attribute='id') | list }}"
"""
RETURN = """
docker:
description: >
Lists of container, volume, and image UUIDs,
both filtered and unfiltered.
returned: always
type: complex
contains:
containers:
description: List of dictionaries of container name, state, and ID
returned: always
type: complex
containers_filtered:
description: >
List of dictionaries of container name, state, and ID
that matched the filter(s)
returned: always
type: complex
images:
description: List of image UUIDs
returned: always
type: list
images_filtered:
description: List of UUIDs that matched the filter(s)
returned: always
type: list
volumes:
description: List of volume UUIDs
returned: always
type: list
volumes_filtered:
description: List of UUIDs that matched the filter(s)
returned: always
type: list
"""
import itertools
from ansible.module_utils.basic import AnsibleModule
DOCKER_SUBCOMMAND_LOOKUP = [
('images', 'images', '-q'),
('volumes', 'volume ls', '-q'),
('containers', 'ps -a', '--format {{.Names}}##{{.ID}}##{{.Status}}')
]
def run_docker_command(
module,
docker_bin,
sub_command=[],
opts='-q',
filters=[]):
for item in docker_bin, sub_command, opts, filters:
if not isinstance(item, list):
item = item.split('\n')
if not isinstance(docker_bin, list):
docker_bin = docker_bin.split()
if not isinstance(sub_command, list):
sub_command = sub_command.split()
if not isinstance(opts, list):
opts = opts.split()
if not isinstance(filters, list):
filters = filters.split()
filters = ['-f ' + i for i in filters]
command = list(itertools.chain(docker_bin, sub_command, opts, filters))
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(
msg='Error running command {}.\n\n \
Original error:\n\n{}'.format(command, err))
if out == '':
out = []
else:
out = out.strip().split('\n')
return rc, out, err
def main():
module = AnsibleModule(
argument_spec=dict(
image_filter=dict(type='list', default=[]),
volume_filter=dict(type='list', default=[]),
container_filter=dict(type='list', default=[]),
),
supports_check_mode=True
)
docker_bin = [module.get_bin_path('docker')]
docker_facts = {}
for item in DOCKER_SUBCOMMAND_LOOKUP:
docker_facts[item[0]] = []
docker_facts[item[0] + '_filtered'] = []
if docker_bin[0]:
docker_facts[item[0]] = []
# Run each Docker command
for item in DOCKER_SUBCOMMAND_LOOKUP:
rc, out, err = run_docker_command(
module,
docker_bin,
sub_command=item[1],
opts=item[2])
# For everything but containers, return just the UIDs
if item[0] != 'containers':
docker_facts[item[0]] = out
elif item[0] == 'containers':
# For containers, use a custom format to get name, id,
# and status
for line in out:
container_name, container_id, container_status = \
line.split('##')
container_status = container_status.split()[0]
docker_facts[item[0]].append({
'name': container_name,
'id': container_id,
'status': container_status
})
# Get filtered facts
rc, out, err = run_docker_command(
module,
docker_bin,
sub_command=item[1],
opts=item[2],
filters=module.params[item[0].rstrip('s') + '_filter']
)
if item[0] != 'containers':
docker_facts[item[0] + '_filtered'] = out
elif item[0] == 'containers':
for line in out:
container_name, container_id, container_status = \
line.split('##')
container_status = container_status.split()[0]
docker_facts[item[0] + '_filtered'].append({
'name': container_name,
'id': container_id,
'status': container_status
})
results = dict(
ansible_facts=dict(
docker=docker_facts
)
)
module.exit_json(**results)
if __name__ == '__main__':
main()

88
library/haproxy_conf.py Normal file
View File

@ -0,0 +1,88 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: haproxy_conf
short_description: Gather the HAProxy config
description:
- Gather the HAProxy config
options:
path:
required: true
description:
- file path to the config file
type: str
author: "Tomas Sedovic"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Gather the HAProxy config
haproxy_conf: path=/etc/haproxy/haproxy.cfg
'''
# ConfigParser chokes on both mariadb and haproxy files. Luckily They have
# a syntax approaching ini config file so they are relatively easy to parse.
# This generic ini style config parser is not perfect -- it can ignore some
# valid options -- but good enough for our use case.
def generic_ini_style_conf_parser(file_path, section_regex, option_regex):
config = {}
current_section = None
with open(file_path) as config_file:
for line in config_file:
match_section = re.match(section_regex, line)
if match_section:
current_section = match_section.group(1)
config[current_section] = {}
match_option = re.match(option_regex, line)
if match_option and current_section:
option = re.sub('\s+', ' ', match_option.group(1))
config[current_section][option] = match_option.group(2)
return config
def parse_haproxy_conf(file_path):
section_regex = '^(\w+)'
option_regex = '^(?:\s+)(\w+(?:\s+\w+)*?)\s+([\w/]*)$'
return generic_ini_style_conf_parser(file_path, section_regex,
option_regex)
def main():
module = AnsibleModule(argument_spec=dict(
path=dict(required=True, type='str'),
))
haproxy_conf_path = module.params.get('path')
try:
config = parse_haproxy_conf(haproxy_conf_path)
except IOError:
module.fail_json(msg="Could not open the haproxy conf file at: '%s'" %
haproxy_conf_path)
module.exit_json(changed=False, ansible_facts={u'haproxy_conf': config})
if __name__ == '__main__':
main()

63
library/hiera.py Normal file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: hiera
short_description: Get data from hiera
description:
- Get data from hiera
options:
name:
required: true
description:
- Name to lookup
type: str
author: "Martin Andre (@mandre)"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Lookup foo
hiera: name=foo
'''
def main():
module = AnsibleModule(argument_spec=dict(
name=dict(required=True, type='str'),
))
name = module.params.get('name')
cmd = ['/usr/bin/hiera', '-c', '/etc/puppet/hiera.yaml', name]
result = subprocess.check_output(cmd, universal_newlines=True).rstrip()
if result == 'nil':
module.fail_json(msg="Failed to retrieve hiera data for {}"
.format(name))
module.exit_json(changed=False,
ansible_facts={name: result})
if __name__ == '__main__':
main()

61
library/icmp_ping.py Normal file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: icmp_ping
short_description: ICMP ping remote hosts
requirements: [ ping ]
description:
- Check host connectivity with ICMP ping.
options:
host:
required: true
description:
- IP address or hostname of host to ping
type: str
author: "Martin Andre (@mandre)"
'''
EXAMPLES = '''
# Ping host:
- icmp: name=somegroup state=present
- hosts: webservers
tasks:
- name: Check Internet connectivity
ping: host="www.ansible.com"
'''
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True, type='str'),
)
)
host = module.params.pop('host')
result = module.run_command('ping -c 1 {}'.format(host))
failed = (result[0] != 0)
msg = result[1] if result[1] else result[2]
module.exit_json(changed=False, failed=failed, msg=msg)
if __name__ == '__main__':
main()

156
library/ini.py Normal file
View File

@ -0,0 +1,156 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ansible module to read a value from an Ini file.
# Usage:
# - ini: path=/path/to/file.ini section=default key=something
# register: my_ini
#
# This will read the `path/to/file.ini` file and read the `Hello!` value under:
# [default]
# something = Hello!
#
# You can register the result and use it later with `{{ my_ini.value }}`
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from enum import Enum
import os
from ansible.module_utils.basic import AnsibleModule
# Possible return values
class ReturnValue(Enum):
OK = 0
INVALID_FORMAT = 1
KEY_NOT_FOUND = 2
def check_file(path, ignore_missing):
'''Validate entered path'''
if not (os.path.exists(path) and os.path.isfile(path)):
return "Could not open the ini file: '{}'".format(path)
else:
return ''
def get_result(path, section, key):
'''Get value based on section and key'''
msg = ''
value = None
config = ConfigParser.SafeConfigParser()
try:
config.read(path)
except Exception:
msg = "The file '{}' is not in a valid INI format.".format(path)
ret = ReturnValue.INVALID_FORMAT
return (ret, msg, value)
try:
value = config.get(section, key)
msg = ("The key '{}' under the section '{}' in file {} "
"has the value: '{}'").format(key, section, path, value)
ret = ReturnValue.OK
return (ret, msg, value)
except ConfigParser.Error:
value = None
msg = "There is no key '{}' under the section '{}' in file {}.".format(
key, section, path)
ret = ReturnValue.KEY_NOT_FOUND
return (ret, msg, value)
DOCUMENTATION = '''
---
module: ini
short_description: Get data from an ini file
description:
- Get data from an ini file
options:
path:
required: true
description:
- File path
type: str
section:
required: true
description:
- Section to look up
type: str
key:
required: true
description:
- Section key to look up
type: str
ignore_missing_file:
required: false
description:
- Flag if a missing file should be ignored
type: bool
author: "Tomas Sedovic"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Lookup bar value
ini: path=config.ini section=foo key=bar ignore_missing_file=True
'''
def main():
module = AnsibleModule(argument_spec=dict(
path=dict(required=True, type='str'),
section=dict(required=True, type='str'),
key=dict(required=True, type='str'),
ignore_missing_file=dict(required=False, type='bool'),
))
ini_file_path = module.params.get('path')
ignore_missing = module.params.get('ignore_missing_file')
# Check that file exists
msg = check_file(ini_file_path, ignore_missing)
if msg != '':
# Opening file failed
if ignore_missing:
module.exit_json(msg=msg, changed=False, value=None)
else:
module.fail_json(msg=msg)
else:
# Try to parse the result from ini file
section = module.params.get('section')
key = module.params.get('key')
ret, msg, value = get_result(ini_file_path, section, key)
if ret == ReturnValue.INVALID_FORMAT:
module.fail_json(msg=msg)
elif ret == ReturnValue.KEY_NOT_FOUND:
module.exit_json(msg=msg, changed=False, value=None)
elif ret == ReturnValue.OK:
module.exit_json(msg=msg, changed=False, value=value)
if __name__ == '__main__':
main()

130
library/ip_range.py Normal file
View File

@ -0,0 +1,130 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netaddr
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: ip_range
short_description: Check the size of an IP range
description:
- Check if the size of an IP range against a minimum value.
options:
start:
required: true
description:
- Start IP
type: str
end:
required: true
description:
- End IP
type: str
min_size:
required: true
description:
- Minum size of the range
type: int
author: "Tomas Sedovic"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Check the IP range
ip_range:
start: 192.0.2.5
end: 192.0.2.24
min_size: 15
'''
def check_arguments(start, end, min_size):
'''Validate format of arguments'''
errors = []
# Check format of arguments
try:
startIP = netaddr.IPAddress(start)
except netaddr.core.AddrFormatError:
errors.append('Argument start ({}) must be an IP'.format(start))
try:
endIP = netaddr.IPAddress(end)
except netaddr.core.AddrFormatError:
errors.append('Argument end ({}) must be an IP'.format(end))
if not errors:
if startIP.version != endIP.version:
errors.append("Arguments start, end must share the same IP "
"version")
if startIP > endIP:
errors.append("Lower IP bound ({}) must be smaller than upper "
"bound ({})".format(startIP, endIP))
if min_size < 0:
errors.append('Argument min_size({}) must be greater than 0'
.format(min_size))
return errors
def check_IP_range(start, end, min_size):
'''Compare IP range with minimum size'''
errors = []
iprange = netaddr.IPRange(start, end)
if len(iprange) < min_size:
errors = [
'The IP range {} - {} contains {} addresses.'.format(
start, end, len(iprange)),
'This might not be enough for the deployment or later scaling.'
]
return errors
def main():
module = AnsibleModule(argument_spec=dict(
start=dict(required=True, type='str'),
end=dict(required=True, type='str'),
min_size=dict(required=True, type='int'),
))
start = module.params.get('start')
end = module.params.get('end')
min_size = module.params.get('min_size')
# Check arguments
errors = check_arguments(start, end, min_size)
if errors:
module.fail_json(msg='\n'.join(errors))
else:
# Check IP range
range_errors = check_IP_range(start, end, min_size)
if range_errors:
module.fail_json(msg='\n'.join(range_errors))
else:
module.exit_json(msg='success')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,533 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import netaddr
import os.path
import yaml
import six
from ansible.module_utils.basic import AnsibleModule
# from os_net_config import validator
from tripleo_validations.utils import get_nested
DOCUMENTATION = '''
---
module: network_environment
short_description: Validate networking templates
description:
- Performs networking-related checks on a set of TripleO templates
options:
netenv_path:
required: true
description:
- The path of the base network environment file
type: str
plan_env_path:
required: true
description:
- The path of the plan environment file
type: str
ip_pools_path:
required: true
description:
- The path of the IP pools network environment file
type: str
template_files:
required: true
description:
- A list of template files and contents
type: list
author: "Tomas Sedovic, Martin André, Florian Fuchs"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Check the Network environment
network_environment:
netenv_path: environments/network-environment.yaml
template_files: "{{ lookup('tht') }}"
plan_env_path: plan-environment.yaml
ip_pools_path: environments/ips-from-pool-all.yaml
'''
def open_network_environment_files(netenv_path, template_files):
errors = []
try:
network_data = yaml.safe_load(template_files[netenv_path])
except Exception as e:
return ({}, {}, ["Can't open network environment file '{}': {}"
.format(netenv_path, e)])
nic_configs = []
resource_registry = network_data.get('resource_registry', {})
for nic_name, relative_path in six.iteritems(resource_registry):
if nic_name.endswith("Net::SoftwareConfig"):
nic_config_path = os.path.normpath(
os.path.join(os.path.dirname(netenv_path), relative_path))
try:
nic_configs.append((
nic_name, nic_config_path,
yaml.safe_load(template_files[nic_config_path])))
except Exception as e:
errors.append(
"Can't open the resource '{}' reference file '{}': {}"
.format(nic_name, nic_config_path, e))
return (network_data, nic_configs, errors)
def validate(netenv_path, template_files):
network_data, nic_configs, errors = open_network_environment_files(
netenv_path, template_files)
errors.extend(validate_network_environment(network_data, nic_configs))
return errors
def validate_network_environment(network_data, nic_configs):
errors = []
cidrinfo = {}
poolsinfo = {}
vlaninfo = {}
staticipinfo = {}
for item, data in six.iteritems(network_data.get('parameter_defaults',
{})):
if item.endswith('NetCidr'):
cidrinfo[item] = data
elif item.endswith('AllocationPools'):
poolsinfo[item] = data
elif item.endswith('NetworkVlanID'):
vlaninfo[item] = data
elif item.endswith('IPs'):
staticipinfo[item] = data
for nic_config_name, nic_config_path, nic_config in nic_configs:
errors.extend(check_nic_configs(nic_config_path, nic_config))
errors.extend(check_cidr_overlap(cidrinfo.values()))
errors.extend(
check_allocation_pools_pairing(
network_data.get('parameter_defaults', {}), poolsinfo))
errors.extend(check_static_ip_pool_collision(staticipinfo, poolsinfo))
errors.extend(check_vlan_ids(vlaninfo))
errors.extend(check_static_ip_in_cidr(cidrinfo, staticipinfo))
errors.extend(duplicate_static_ips(staticipinfo))
return errors
def check_nic_configs(path, nic_data):
errors = []
if not isinstance(nic_data, collections.Mapping):
return ["The nic_data parameter must be a dictionary."]
# Look though every resources bridges and make sure there is only a single
# bond per bridge and only 1 interface per bridge if there are no bonds.
resources = nic_data.get('resources')
if not isinstance(resources, collections.Mapping):
return ["The nic_data must contain the 'resources' key and it must be "
"a dictionary."]
for name, resource in six.iteritems(resources):
try:
nested_path = [
('properties', collections.Mapping, 'dictionary'),
('config', collections.Mapping, 'dictionary'),
('network_config', collections.Iterable, 'list'),
]
bridges = get_nested(resource, name, nested_path)
except ValueError as e:
errors.append('{}'.format(e))
continue
# Not all resources contain a network config:
if not bridges:
continue
# TODO(flfuchs) 2018-11-22: Rocky introduced a couple of
# template changes using a schema that cant't be found in
# os-net-config's schema.yaml file yet, so the validator fails
# even though the templates are working. Until this is done, we
# skip the schema validation.
# Validate the os_net_config object against the schema.
# v_errors = validator.validate_config(bridges, path)
# errors.extend(v_errors)
# if len(v_errors) > 0:
# continue
# If we get here, the nic config file conforms to the schema and
# there is no more need to check for existence and type of
# properties.
for bridge in bridges:
if bridge['type'] == 'ovs_bridge':
bond_count = 0
interface_count = 0
for bridge_member in bridge['members']:
if bridge_member['type'] in ('ovs_bond', 'ovs_dpdk_bond'):
bond_count += 1
elif bridge_member['type'] == 'interface':
interface_count += 1
else:
pass
if bond_count >= 2:
errors.append(
'Invalid bonding: There are >= 2 bonds for'
' bridge {} of resource {} in {}'.format(
bridge['name'], name, path))
if bond_count == 0 and interface_count > 1:
errors.append(
'Invalid interface: When not using a bond, '
'there can only be 1 interface for bridge {} '
'of resource {} in {}'.format(
bridge['name'], name, path))
if bond_count == 0 and interface_count == 0:
errors.append(
'Invalid config: There must be at least '
'1 interface or 1 bond for bridge {}'
'of resource {} in {}'.format(
bridge['name'], name, path))
# check if the bridge has name br-int
if bridge['name'] == 'br-int':
errors.append(
'br-int bridge name is reserved for '
'integration bridge')
return errors
def check_cidr_overlap(networks):
errors = []
objs = []
if not isinstance(networks, collections.Iterable):
return ["The argument must be iterable."]
for x in networks:
try:
objs.append(netaddr.IPNetwork(x))
except (ValueError, TypeError):
errors.append('Invalid network: {}'.format(x))
for net1, net2 in itertools.combinations(objs, 2):
if (net1 in net2 or net2 in net1):
errors.append(
'Networks {} and {} overlap.'
.format(net1, net2))
return errors
def check_allocation_pools_pairing(filedata, pools):
if not isinstance(filedata, collections.Mapping):
return ["The `filedata` argument must be a dictionary."]
if not isinstance(pools, collections.Mapping):
return ["The `pools` argument must be a dictionary."]
errors = []
for poolitem, pooldata in six.iteritems(pools):
pool_objs = []
if not isinstance(pooldata, collections.Iterable):
errors.append('The IP ranges in {} must form a list.'
.format(poolitem))
continue
# Check IP range format
for dict_range in pooldata:
try:
pool_objs.append(netaddr.IPRange(
netaddr.IPAddress(dict_range['start']),
netaddr.IPAddress(dict_range['end'])))
except Exception:
errors.append("Invalid format of the IP range in {}: {}"
.format(poolitem, dict_range))
continue
# Check if CIDR is specified and IP network is valid
subnet_item = poolitem.split('AllocationPools')[0] + 'NetCidr'
try:
network = filedata[subnet_item]
subnet_obj = netaddr.IPNetwork(network)
except KeyError:
errors.append('The {} CIDR is not specified for {}.'
.format(subnet_item, poolitem))
continue
except Exception:
errors.append('Invalid IP network: {}'.format(network))
continue
for range in pool_objs:
# Check if pool is included in subnet
if range not in subnet_obj:
errors.append('Allocation pool {} {} outside of subnet'
' {}: {}'.format(poolitem,
pooldata,
subnet_item,
subnet_obj))
break
# Check for overlapping pools
for other in [r for r in pool_objs if r != range]:
if range.first in other or range.last in other:
errors.append('Some pools in {} are overlapping.'.format(
poolitem))
break
return errors
def check_static_ip_pool_collision(static_ips, pools):
"""Statically defined IP address must not conflict with allocation pools.
The allocation pools come as a dict of items in the following format:
InternalApiAllocationPools: [
{'start': '10.35.191.150', 'end': '10.35.191.240'}
]
The static IP addresses are dicts of:
ComputeIPs: {
'internal_api': ['10.35.191.100', etc.],
'storage': ['192.168.100.45', etc.]
}
"""
if not isinstance(static_ips, collections.Mapping):
return ["The static IPs input must be a dictionary."]
if not isinstance(pools, collections.Mapping):
return ["The Pools input must be a dictionary."]
errors = []
pool_ranges = []
for pool_name, ranges in six.iteritems(pools):
if not isinstance(ranges, collections.Iterable):
errors.append("The IP ranges in {} must form a list."
.format(pool_name))
continue
for allocation_range in ranges:
try:
ip_range = netaddr.IPRange(allocation_range['start'],
allocation_range['end'])
except Exception:
errors.append("Invalid format of the IP range in {}: {}"
.format(pool_name, allocation_range))
continue
pool_ranges.append((pool_name, ip_range))
for role, services in six.iteritems(static_ips):
if not isinstance(services, collections.Mapping):
errors.append("The {} must be a dictionary.".format(role))
continue
for service, ips in six.iteritems(services):
if not isinstance(ips, collections.Iterable):
errors.append("The {}->{} must be an array."
.format(role, service))
continue
for ip in ips:
try:
ip = netaddr.IPAddress(ip)
except netaddr.AddrFormatError as e:
errors.append("{} is not a valid IP address: {}"
.format(ip, e))
continue
ranges_with_conflict = ranges_conflicting_with_ip(
ip, pool_ranges)
if ranges_with_conflict:
for pool_name, ip_range in ranges_with_conflict:
msg = "IP address {} from {}[{}] is in the {} pool."
errors.append(msg.format(
ip, role, service, pool_name))
return errors
def ranges_conflicting_with_ip(ip_address, ip_ranges):
"""Check for all conflicts of the IP address conflicts.
This takes a single IP address and a list of `(pool_name,
netenv.IPRange)`s.
We return all ranges that the IP address conflicts with. This is to
improve the final error messages.
"""
return [(pool_name, ip_range) for (pool_name, ip_range) in ip_ranges
if ip_address in ip_range]
def check_vlan_ids(vlans):
if not isinstance(vlans, collections.Mapping):
return ["The vlans parameter must be a dictionary."]
errors = []
invertdict = {}
for k, v in six.iteritems(vlans):
if v not in invertdict:
invertdict[v] = k
else:
errors.append('Vlan ID {} ({}) already exists in {}'.format(
v, k, invertdict[v]))
return errors
def check_static_ip_in_cidr(networks, static_ips):
"""Check all static IP addresses are from the corresponding network range.
"""
if not isinstance(networks, collections.Mapping):
return ["The networks argument must be a dictionary."]
if not isinstance(static_ips, collections.Mapping):
return ["The static_ips argument must be a dictionary."]
errors = []
network_ranges = {}
# TODO(shadower): Refactor this so networks are always valid and already
# converted to `netaddr.IPNetwork` here. Will be useful in the other
# checks.
for name, cidr in six.iteritems(networks):
try:
network_ranges[name] = netaddr.IPNetwork(cidr)
except Exception:
errors.append("Network '{}' has an invalid CIDR: '{}'"
.format(name, cidr))
for role, services in six.iteritems(static_ips):
if not isinstance(services, collections.Mapping):
errors.append("The {} must be a dictionary.".format(role))
continue
for service, ips in six.iteritems(services):
range_name = service.title().replace('_', '') + 'NetCidr'
if range_name in network_ranges:
if not isinstance(ips, collections.Iterable):
errors.append("The {}->{} must be a list."
.format(role, service))
continue
for ip in ips:
if ip not in network_ranges[range_name]:
errors.append(
"The IP address {} is outside of the {} range: {}"
.format(ip, range_name, networks[range_name]))
else:
errors.append(
"Service '{}' does not have a "
"corresponding range: '{}'.".format(service, range_name))
return errors
def duplicate_static_ips(static_ips):
errors = []
if not isinstance(static_ips, collections.Mapping):
return ["The static_ips argument must be a dictionary."]
ipset = collections.defaultdict(list)
# TODO(shadower): we're doing this netsted loop multiple times. Turn it
# into a generator or something.
for role, services in six.iteritems(static_ips):
if not isinstance(services, collections.Mapping):
errors.append("The {} must be a dictionary.".format(role))
continue
for service, ips in six.iteritems(services):
if not isinstance(ips, collections.Iterable):
errors.append("The {}->{} must be a list."
.format(role, service))
continue
for ip in ips:
ipset[ip].append((role, service))
for ip, sources in six.iteritems(ipset):
if len(sources) > 1:
msg = "The {} IP address was entered multiple times: {}."
formatted_sources = ("{}[{}]"
.format(*source) for source in sources)
errors.append(msg.format(ip, ", ".join(formatted_sources)))
return errors
def validate_node_pool_size(plan_env_path, ip_pools_path, template_files):
warnings = []
plan_env = yaml.safe_load(template_files[plan_env_path])
ip_pools = yaml.safe_load(template_files[ip_pools_path])
param_defaults = plan_env.get('parameter_defaults')
node_counts = {
param.replace('Count', ''): count
for param, count in six.iteritems(param_defaults)
if param.endswith('Count') and count > 0
}
# TODO(akrivoka): There are a lot of inconsistency issues with parameter
# naming in THT :( Once those issues are fixed, this block should be
# removed.
if 'ObjectStorage' in node_counts:
node_counts['SwiftStorage'] = node_counts['ObjectStorage']
del node_counts['ObjectStorage']
param_defaults = ip_pools.get('parameter_defaults')
role_pools = {
param.replace('IPs', ''): pool
for param, pool in six.iteritems(param_defaults)
if param.endswith('IPs') and param.replace('IPs', '') in node_counts
}
for role, node_count in six.iteritems(node_counts):
try:
pools = role_pools[role]
except KeyError:
warnings.append(
"Found {} node(s) assigned to '{}' role, but no static IP "
"pools defined.".format(node_count, role)
)
continue
for pool_name, pool_ips in six.iteritems(pools):
if len(pool_ips) < node_count:
warnings.append(
"Insufficient number of IPs in '{}' pool for '{}' role: "
"{} IP(s) found in pool, but {} nodes assigned to role."
.format(pool_name, role, len(pool_ips), node_count)
)
return warnings
def main():
module = AnsibleModule(argument_spec=dict(
netenv_path=dict(required=True, type='str'),
plan_env_path=dict(required=True, type='str'),
ip_pools_path=dict(required=True, type='str'),
template_files=dict(required=True, type='list')
))
netenv_path = module.params.get('netenv_path')
plan_env_path = module.params.get('plan_env_path')
ip_pools_path = module.params.get('ip_pools_path')
template_files = {name: content[1] for (name, content) in
module.params.get('template_files')}
errors = validate(netenv_path, template_files)
warnings = []
try:
warnings = validate_node_pool_size(plan_env_path, ip_pools_path,
template_files)
except Exception as e:
errors.append("{}".format(e))
if errors:
module.fail_json(msg="\n".join(errors))
else:
module.exit_json(
msg="No errors found for the '{}' file.".format(netenv_path),
warnings=warnings,
)
if __name__ == '__main__':
main()

158
library/node_disks.py Normal file
View File

@ -0,0 +1,158 @@
#!/usr/bin/env python
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule # noqa
DOCUMENTATION = '''
---
module: node_disks
short_description: Check disks, flavors and root device hints
description:
- Check if each node has a root device hint set if there is more
than one disk and compare flavors to disk sizes.
options:
nodes:
required: true
description:
- A list of nodes
type: list
flavors:
required: true
description:
- A list of flavors
type: list
introspection_data:
required: true
description:
- Introspection data for all nodes
type: list
author: "Florian Fuchs <flfuchs@redhat.com>"
'''
EXAMPLES = '''
- hosts: undercloud
tasks:
- name: Check node disks
node_disks:
nodes: "{{ lookup('ironic_nodes') }}"
flavors: "{{ lookup('nova_flavors') }}"
introspection_data: "{{ lookup('introspection_data',
auth_url=auth_url.value, password=password.value) }}"
'''
IGNORE_BYTE_MAX = 4294967296
ONE_DISK_TOO_SMALL_ERROR = """\
The node {} only has one disk and it's too small for the "{}" flavor"""
NO_RDH_SMALLEST_DISK_TOO_SMALL_ERROR = (
'{} has more than one disk available for deployment and no '
'root device hints set. The disk that will be used is too small '
'for the flavor with the largest disk requirement ("{}").')
def _get_minimum_disk_size(flavors):
min_gb = 0
name = 'n.a.'
for key, val in flavors.items():
disk_gb = val['disk']
if disk_gb > min_gb:
min_gb = disk_gb
name = key
# convert GB to bytes to compare to introspection data
return name, min_gb * 1073741824
def _get_smallest_disk(disks):
smallest = disks[0]
for disk in disks[1:]:
if disk['size'] < smallest['size']:
smallest = disk
return smallest
def _has_root_device_hints(node_name, node_data):
rdh = node_data.get(
node_name, {}).get('properties', {}).get('root_device')
return rdh is not None
def validate_node_disks(nodes, flavors, introspection_data):
"""Validate root device hints using introspection data.
:param nodes: Ironic nodes
:param introspection_data: Introspection data for all nodes
:returns warnings: List of warning messages
errors: List of error messages
"""
errors = []
warnings = []
# Get the name of the flavor with the largest disk requirement,
# which defines the minimum disk size.
max_disk_flavor, min_disk_size = _get_minimum_disk_size(flavors)
for node, content in introspection_data.items():
disks = content.get('inventory', {}).get('disks')
valid_disks = [disk for disk in disks
if disk['size'] > IGNORE_BYTE_MAX]
root_device_hints = _has_root_device_hints(node, nodes)
smallest_disk = _get_smallest_disk(valid_disks)
if len(valid_disks) == 1:
if smallest_disk.get('size', 0) < min_disk_size:
errors.append(ONE_DISK_TOO_SMALL_ERROR.format(
node, max_disk_flavor))
elif not root_device_hints and len(valid_disks) > 1:
if smallest_disk.get('size', 0) < min_disk_size:
errors.append(NO_RDH_SMALLEST_DISK_TOO_SMALL_ERROR.format(
node, max_disk_flavor))
else:
warnings.append('{} has more than one disk available for '
'deployment'.format(node))
return errors, warnings
def main():
module = AnsibleModule(argument_spec=dict(
nodes=dict(required=True, type='list'),
flavors=dict(required=True, type='dict'),
introspection_data=dict(required=True, type='list')
))
nodes = {node['name']: node for node in module.params.get('nodes')}
flavors = module.params.get('flavors')
introspection_data = {name: content for (name, content) in
module.params.get('introspection_data')}
errors, warnings = validate_node_disks(nodes,
flavors,
introspection_data)
if errors:
module.fail_json(msg="\n".join(errors))
elif warnings:
module.exit_json(warnings="\n".join(warnings))
else:
module.exit_json(msg="Root device hints are either set or not "
"necessary.")
if __name__ == '__main__':
main()

78
library/overcloudrc.py Normal file
View File

@ -0,0 +1,78 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import AnsibleModule
import os.path
import subprocess
DOCUMENTATION = '''
---
module: overcloudrc
short_description: Source the overcloudrc file
description:
- Source the overcloudrc file
options:
path:
required: true
description:
- The file path
type: str
author: "Tomas Sedovic"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Source overcloudrc
overcloudrc: path=/home/stack/overcloudrc
'''
def main():
module = AnsibleModule(argument_spec=dict(
path=dict(required=True, type='str'),
))
overcloudrc_path = os.path.expanduser(module.params.get('path'))
if not os.path.isfile(overcloudrc_path):
module.fail_json(
msg="The overcloudrc file at {} does not exist.".format(
overcloudrc_path))
# Use bash to source overcloudrc and print the environment:
command = ['bash', '-c', 'source ' + overcloudrc_path + ' && env']
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True
)
if proc.wait() != 0:
msg = "Could not source '{}'. Return code: {}.\nSTDERR:\n{}".format(
overcloudrc_path, proc.returncode, proc.stderr.read())
module.fail_json(msg=msg)
facts = {}
for line in proc.stdout:
(key, _, value) = line.partition("=")
if key.startswith("OS_"):
facts[key] = value.rstrip()
module.exit_json(changed=False, ansible_facts={'overcloudrc': facts})
if __name__ == '__main__':
main()

View File

@ -0,0 +1,137 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: OVS DPDK PMD CPU's check
short_description: Run PMD CPU's from all the NUMA nodes check
description:
- Run PMD CPU's from all the NUMA nodes check
options:
pmd_cpu_mask:
required: true
description:
- The pmd cpu mask value
type: str
author: "Jaganathan Palanisamy"
'''
EXAMPLES = '''
- hosts: ComputeOvsDpdk
vars:
pmd_cpu_mask: "1010010000000001"
tasks:
- name: Run PMD CPU's check
become: true
ovs_dpdk_pmd_cpus_check: pmd_cpu_mask={{ pmad_cpu_mask }}
'''
def get_cpus_list_from_mask_value(mask_val):
"""Gets CPU's list from the mask value
:return: comma separated CPU's list
"""
mask_val = mask_val.strip('\\"')
cpus_list = []
int_mask_val = int(mask_val, 16)
bin_mask_val = bin(int_mask_val)
bin_mask_val = str(bin_mask_val).replace('0b', '')
rev_bin_mask_val = bin_mask_val[::-1]
thread = 0
for bin_val in rev_bin_mask_val:
if bin_val == '1':
cpus_list.append(thread)
thread += 1
return ','.join([str(cpu) for cpu in cpus_list])
# Gets the distinct numa nodes, physical and logical cpus info
# for all numa nodes.
def get_nodes_cores_info(module):
dict_cpus = {}
numa_nodes = []
cmd = "sudo lscpu -p=NODE,CORE,CPU"
result = module.run_command(cmd)
if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))):
err = "Unable to determine physical and logical cpus."
module.fail_json(msg=err)
else:
for line in str(result[1]).split('\n'):
if (line.strip(' ') and not line.strip(' ').startswith('#')):
cpu_info = line.strip(' ').split(',')
try:
node = int(cpu_info[0])
cpu = int(cpu_info[1])
thread = int(cpu_info[2])
if node not in numa_nodes:
numa_nodes.append(node)
# CPU and NUMA node together forms a unique value,
# as cpu is specific to a NUMA node
# NUMA node id and cpu id tuple is used for unique key
key = node, cpu
if key in dict_cpus:
if thread not in dict_cpus[key]['thread_siblings']:
dict_cpus[key]['thread_siblings'].append(thread)
else:
cpu_item = {}
cpu_item['thread_siblings'] = [thread]
cpu_item['cpu'] = cpu
cpu_item['numa_node'] = node
dict_cpus[key] = cpu_item
except (IndexError, ValueError):
err = "Unable to determine physical and logical cpus."
module.fail_json(msg=err)
return (numa_nodes, list(dict_cpus.values()))
def validate_pmd_cpus(module, pmd_cpu_mask):
pmd_cpus = get_cpus_list_from_mask_value(pmd_cpu_mask)
pmd_cpu_list = pmd_cpus.split(',')
cpus = []
numa_nodes = []
numa_nodes, cpus = get_nodes_cores_info(module)
valid_numa_nodes = {}
for numa_node in numa_nodes:
valid_numa_nodes[str(numa_node)] = False
for cpu in cpus:
if cpu['numa_node'] == numa_node:
if True in [int(pmd_cpu) in cpu['thread_siblings']
for pmd_cpu in pmd_cpu_list]:
valid_numa_nodes[str(numa_node)] = True
invalid_numa_nodes = [node for node, val in valid_numa_nodes.items()
if not val]
if invalid_numa_nodes:
failed_nodes = ','.join(invalid_numa_nodes)
err = ("Invalid PMD CPU's, cpu is not used from "
"NUMA node(s): %(node)s." % {'node': failed_nodes})
module.fail_json(msg=err)
else:
module.exit_json(msg="PMD CPU's configured correctly.")
def main():
module = AnsibleModule(argument_spec=dict(
pmd_cpu_mask=dict(required=True, type='str'),
))
validate_pmd_cpus(module,
module.params.get('pmd_cpu_mask'))
if __name__ == '__main__':
main()

84
library/pacemaker.py Normal file
View File

@ -0,0 +1,84 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: pacemaker
short_description: Return status from a pacemaker status XML
description:
- Return status from a pacemaker status XML
options:
status:
required: true
description:
- pacemaker status XML
type: str
author: "Tomas Sedovic"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Get pacemaker status
become: true
command: pcs status xml
register: pcs_status
- name: Check pacemaker status
pacemaker: status="{{ pcs_status.stdout }}"
'''
def parse_pcs_status(pcs_status_xml):
root = ElementTree.fromstring(pcs_status_xml)
result = {
'failures': root.findall('failures/failure'),
}
return result
def format_failure(failure):
return ("Task {task} {op_key} failed on node {node}. Exit reason: "
"'{exitreason}'. Exit status: '{exitstatus}'."
.format(task=failure.get('task'),
op_key=failure.get('op_key'),
node=failure.get('node'),
exitreason=failure.get('exitreason'),
exitstatus=failure.get('exitstatus')))
def main():
module = AnsibleModule(argument_spec=dict(
status=dict(required=True, type='str'),
))
pcs_status = parse_pcs_status(module.params.get('status'))
failures = pcs_status['failures']
failed = len(failures) > 0
if failed:
msg = "The pacemaker status contains some failed actions:\n" +\
'\n'.join((format_failure(failure) for failure in failures))
else:
msg = "The pacemaker status reports no errors."
module.exit_json(
failed=failed,
msg=msg,
)
if __name__ == '__main__':
main()

225
library/switch_vlans.py Normal file
View File

@ -0,0 +1,225 @@
#!/usr/bin/env python
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os.path
import yaml
import six
from ansible.module_utils.basic import AnsibleModule # noqa
from tripleo_validations import utils
DOCUMENTATION = '''
---
module: switch_vlans
short_description: Check configured VLANs against Ironic introspection data
description:
- Validate that the VLANs defined in TripleO nic config files are in the
LLDP info received from network switches. The LLDP data is stored in
Ironic introspection data per interface.
options:
path:
required: true
description:
- The path of the base network environment file
type: str
template_files:
required: true
description:
- A list of template files and contents
type: list
introspection_data:
required: true
description:
- Introspection data for all nodes
type: list
author: "Bob Fournier"
'''
EXAMPLES = '''
- hosts: undercloud
tasks:
- name: Check that switch vlans are present if used in nic-config files
network_environment:
path: environments/network-environment.yaml
template_files: "{{ lookup('tht') }}"
introspection_data: "{{ lookup('introspection_data',
auth_url=auth_url.value, password=password.value) }}"
'''
def open_network_environment_files(netenv_path, template_files):
errors = []
try:
network_data = yaml.safe_load(template_files[netenv_path])
except Exception as e:
return ({}, {}, ["Can't open network environment file '{}': {}"
.format(netenv_path, e)])
nic_configs = []
resource_registry = network_data.get('resource_registry', {})
for nic_name, relative_path in six.iteritems(resource_registry):
if nic_name.endswith("Net::SoftwareConfig"):
nic_config_path = os.path.normpath(
os.path.join(os.path.dirname(netenv_path), relative_path))
try:
nic_configs.append((
nic_name, nic_config_path,
yaml.safe_load(template_files[nic_config_path])))
except Exception as e:
errors.append(
"Can't open the resource '{}' reference file '{}': {}"
.format(nic_name, nic_config_path, e))
return (network_data, nic_configs, errors)
def validate_switch_vlans(netenv_path, template_files, introspection_data):
"""Check if VLAN exists in introspection data for node
:param netenv_path: path to network_environment file
:param template_files: template files being checked
:param introspection_data: introspection data for all node
:returns warnings: List of warning messages
errors: List of error messages
"""
network_data, nic_configs, errors =\
open_network_environment_files(netenv_path, template_files)
warnings = []
vlans_in_templates = False
# Store VLAN IDs from network-environment.yaml.
vlaninfo = {}
for item, data in six.iteritems(network_data.get('parameter_defaults',
{})):
if item.endswith('NetworkVlanID'):
vlaninfo[item] = data
# Get the VLANs which are actually used in nic configs
for nic_config_name, nic_config_path, nic_config in nic_configs:
resources = nic_config.get('resources')
if not isinstance(nic_config, collections.Mapping):
return [], ["nic_config parameter must be a dictionary."]
if not isinstance(resources, collections.Mapping):
return [], ["The nic_data must contain the 'resources' key "
"and it must be a dictionary."]
for name, resource in six.iteritems(resources):
try:
nested_path = [
('properties', collections.Mapping, 'dictionary'),
('config', collections.Mapping, 'dictionary'),
('network_config', collections.Iterable, 'list'),
]
nw_config = utils.get_nested(resource, name, nested_path)
except ValueError as e:
errors.append('{}'.format(e))
continue
# Not all resources contain a network config:
if not nw_config:
continue
for elem in nw_config:
# VLANs will be in bridge
if elem['type'] == 'ovs_bridge' \
or elem['type'] == 'linux_bridge':
for member in elem['members']:
if member['type'] != 'vlan':
continue
vlans_in_templates = True
vlan_id_str = member['vlan_id']
vlan_id = vlaninfo[vlan_id_str['get_param']]
msg, result = vlan_exists_on_switch(
vlan_id, introspection_data)
warnings.extend(msg)
if not msg and result is False:
errors.append(
"VLAN ID {} not on attached switch".format(
vlan_id))
if not vlans_in_templates:
warnings.append("No VLANs are used on templates files")
return warnings, errors
def vlan_exists_on_switch(vlan_id, introspection_data):
"""Check if VLAN exists in introspection data
:param vlan_id: VLAN id
:param introspection_data: introspection data for all nodes
:returns msg: Error or warning message
result: boolean indicating if VLAN was found
"""
for node, data in introspection_data.items():
node_valid_lldp = False
all_interfaces = data.get('all_interfaces', [])
# Check lldp data on all interfaces for this vlan ID
for interface in all_interfaces:
lldp_proc = all_interfaces[interface].get('lldp_processed', {})
if lldp_proc:
node_valid_lldp = True
switch_vlans = lldp_proc.get('switch_port_vlans', [])
if switch_vlans:
if any(vlan['id'] == vlan_id for vlan in switch_vlans):
return [], True
# If no lldp data for node return warning, not possible to locate vlan
if not node_valid_lldp:
node_uuid = node.split("-", 1)[1]
return ["LLDP data not available for node {}".format(node_uuid)],\
False
return [], False # could not find VLAN ID
def main():
module = AnsibleModule(argument_spec=dict(
path=dict(required=True, type='str'),
template_files=dict(required=True, type='list'),
introspection_data=dict(required=True, type='list')
))
netenv_path = module.params.get('path')
template_files = {name: content[1] for (name, content) in
module.params.get('template_files')}
introspection_data = {name: content for (name, content) in
module.params.get('introspection_data')}
warnings, errors = validate_switch_vlans(netenv_path, template_files,
introspection_data)
if errors:
module.fail_json(msg="\n".join(errors))
elif warnings:
module.exit_json(warnings="\n".join(warnings))
else:
module.exit_json(msg="All VLANs configured on attached switches")
if __name__ == '__main__':
main()

167
library/verify_profiles.py Normal file
View File

@ -0,0 +1,167 @@
#!/usr/bin/env python
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule # noqa
DOCUMENTATION = '''
---
module: verify_profiles
short_description: Check that profiles have enough nodes
description:
- Validate that the profiles assigned have enough nodes available.
options:
nodes:
required: true
description:
- A list of nodes
type: list
flavors:
required: true
description:
- A dictionary of flavors
type: dict
author: "Brad P. Crochet"
'''
EXAMPLES = '''
- hosts: undercloud
tasks:
- name: Collect the flavors
check_flavors:
roles_info: "{{ lookup('roles_info', wantlist=True) }}"
flavors: "{{ lookup('nova_flavors', wantlist=True) }}"
register: flavor_result
- name: Check the profiles
verify_profiles:
nodes: "{{ lookup('ironic_nodes', wantlist=True) }}"
flavors: flavor_result.flavors
'''
def _capabilities_to_dict(caps):
"""Convert the Node's capabilities into a dictionary."""
if not caps:
return {}
if isinstance(caps, dict):
return caps
return dict([key.split(':', 1) for key in caps.split(',')])
def _node_get_capabilities(node):
"""Get node capabilities."""
return _capabilities_to_dict(
node['properties'].get('capabilities'))
def verify_profiles(nodes, flavors):
"""Check if roles info is correct
:param nodes: list of nodes
:param flavors: dictionary of flavors
:returns warnings: List of warning messages
errors: List of error messages
"""
errors = []
warnings = []
bm_nodes = {node['uuid']: node for node in nodes
if node['provision_state'] in ('available', 'active')}
free_node_caps = {uu: _node_get_capabilities(node)
for uu, node in bm_nodes.items()}
profile_flavor_used = False
for flavor_name, (flavor, scale) in flavors.items():
if not scale:
continue
profile = None
keys = flavor.get('keys')
if keys:
profile = keys.get('capabilities:profile')
if not profile and len(flavors) > 1:
message = ('Error: The {flavor} flavor has no profile '
'associated.\n'
'Recommendation: assign a profile with openstack '
'flavor set --property '
'"capabilities:profile"="PROFILE_NAME" {flavor}')
errors.append(message.format(flavor=flavor_name))
continue
profile_flavor_used = True
assigned_nodes = [uu for uu, caps in free_node_caps.items()
if caps.get('profile') == profile]
required_count = scale - len(assigned_nodes)
if required_count < 0:
warnings.append('%d nodes with profile %s won\'t be used '
'for deployment now' % (-required_count,
profile))
required_count = 0
for uu in assigned_nodes:
free_node_caps.pop(uu)
if required_count > 0:
message = ('Error: only {total} of {scale} requested ironic '
'nodes are tagged to profile {profile} (for flavor '
'{flavor}).\n'
'Recommendation: tag more nodes using openstack '
'baremetal node set --property "capabilities='
'profile:{profile}" <NODE ID>')
errors.append(message.format(total=scale - required_count,
scale=scale,
profile=profile,
flavor=flavor_name))
nodes_without_profile = [uu for uu, caps in free_node_caps.items()
if not caps.get('profile')]
if nodes_without_profile and profile_flavor_used:
warnings.append("There are %d ironic nodes with no profile that "
"will not be used: %s" % (
len(nodes_without_profile),
', '.join(nodes_without_profile)))
return warnings, errors
def main():
module = AnsibleModule(argument_spec=dict(
nodes=dict(required=True, type='list'),
flavors=dict(required=True, type='dict')
))
nodes = module.params.get('nodes')
flavors = module.params.get('flavors')
warnings, errors = verify_profiles(nodes,
flavors)
if errors:
module.fail_json(msg="\n".join(errors))
elif warnings:
module.exit_json(warnings="\n".join(warnings))
else:
module.exit_json(
msg="No profile errors detected.")
if __name__ == '__main__':
main()

54
library/warn.py Normal file
View File

@ -0,0 +1,54 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: warn
short_description: Add warning to playbook output
description:
- Add warning to playbook output
options:
msg:
required: true
description:
- The warning text
type: str
author: "Martin Andre (@mandre)"
'''
EXAMPLES = '''
- hosts: webservers
tasks:
- name: Output warning message
warn: msg="Warning!"
'''
def main():
module = AnsibleModule(argument_spec=dict(
msg=dict(required=True, type='str'),
))
msg = module.params.get('msg')
module.exit_json(changed=False,
warnings=[msg])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,78 @@
#!/usr/bin/env python
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.plugins.lookup import LookupBase
from glanceclient.exc import HTTPNotFound
from tripleo_validations import utils
DOCUMENTATION = """
lookup: glance_images
description: Retrieve image information from Glance
long_description:
- Load image information using the Glance API and search by attribute.
options:
_terms:
description: Optional filter attribute and filter value
author: Brad P. Crochet <brad@redhat.com>
"""
EXAMPLES = """
- name: Get all image ids from glance
debug:
msg: |
{{ lookup('glance_images', wantlist=True) |
map(attribute='id') | join(', ') }}
- name: Get image with name 'overcloud-full'
debug:
msg: |
{{ lookup('glance_images', 'name', ['overcloud-full'],
wantlist=True) | map(attribute='name') }}"
"""
RETURN = """
_raw:
description: A Python list with results from the API call.
"""
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns server information from nova."""
glance = utils.get_glance_client(variables)
images = []
if len(terms) > 0:
# Look up images by name
if terms[0] == 'name':
for value in terms[1]:
try:
search_data = {terms[0]: value}
images.extend(
[image for image in
glance.images.list(filters=search_data)]
)
except HTTPNotFound:
pass
else:
images = [image for image in glance.images.list()]
return images

View File

@ -0,0 +1,52 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.plugins.lookup import LookupBase
from ironic_inspector_client import ClientError
from ironic_inspector_client import ClientV1
from ironicclient import client
from tripleo_validations.utils import get_auth_session
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns Ironic Inspector introspection data.
Access swift and return introspection data for all nodes.
:returns a list of tuples, one for each node.
"""
session = get_auth_session({
'auth_url': kwargs.get('auth_url'),
'password': kwargs.get('password'),
'username': 'ironic',
'project_name': 'service',
})
ironic = client.get_client(1, session=session)
ironic_inspector = ClientV1(session=session)
ret = []
for node in ironic.node.list():
try:
ret.append((node.name, ironic_inspector.get_data(node.uuid)))
except ClientError:
pass
return ret

View File

@ -0,0 +1,101 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DOCUMENTATION = """
lookup: ironic_nodes
description: Retrieve node information from Ironic
long_description:
- Load node information using the Ironic API
options:
_terms:
description: Optional filter attribute and filter value
author: Florian Fuchs <flfuchs@redhat.com>
"""
EXAMPLES = """
- name: Get all nodes from Ironic
debug:
msg: "{{ lookup('ironic_nodes', wantlist=True) }}"
- name: Lookup all nodes that match a list of IDs
debug:
msg: |
{{ lookup('ironic_nodes', 'id',
['c8a1c7b8-d6b1-408b-b4a6-5881efdfd65c',
'4bea536d-9d37-432f-a77e-7c65f1cf3acb'],
wantlist=True) }}"
- name: Get all nodes for a set of instance UUIDs
debug:
msg: |
{{ lookup('ironic_nodes', 'instance_uuid',
['1691a1c7-9974-4bcc-a07a-5dec7fc04da0',
'07f2435d-820c-46ce-9097-cf8a7282293e'],
wantlist=True) }}"
- name: Get all nodes marked as 'associated'
debug:
msg: |
{{ lookup('ironic_nodes', 'associated',
wantlist=True) }}"
- name: Get nodes in provision state, and not associated or in maintenance
debug:
msg: |
{{ lookup('ironic_nodes', 'provision_state',
['available', 'inspect'], wantlist=True)}}
"""
RETURN = """
_raw:
description: A Python list with results from the API call.
"""
from ansible.plugins.lookup import LookupBase
from tripleo_validations import utils
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns node information from ironic."""
ironic = utils.get_ironic_client(variables)
if len(terms) > 0:
if terms[0] == 'id':
nodes = [ironic.node.get(id) for id in terms[1]]
return [utils.filtered(node) for node in nodes]
elif terms[0] == 'instance_uuid':
nodes = [ironic.node.get_by_instance_uuid(uuid)
for uuid in terms[1]]
return [utils.filtered(node) for node in nodes]
elif terms[0] == 'associated':
nodes = ironic.node.list(associated=True, detail=True)
return [utils.filtered(node) for node in nodes]
elif terms[0] == 'provision_state':
nodes = []
for term in terms[1]:
nodes.extend(ironic.node.list(
provision_state=term,
associated=False,
maintenance=False,
detail=True))
return [utils.filtered(node) for node in nodes]
else:
return [utils.filtered(node)
for node in ironic.node.list(detail=True)]

View File

@ -0,0 +1,59 @@
#!/usr/bin/env python
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.plugins.lookup import LookupBase
from tripleo_validations import utils
DOCUMENTATION = """
lookup: nova_flavors
description: Retrieve flavor information from Nova
long_description:
- Load flavor information using the Nova API.
author: Brad P. Crochet <brad@redhat.com>
"""
EXAMPLES = """
- name: Get all flavors from nova
debug:
msg: |
{{ lookup('nova_flavors') }}
"""
RETURN = """
_raw:
description: A Python list with results from the API call.
"""
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns server information from nova."""
nova = utils.get_nova_client(variables)
return {f.name: {'name': f.name,
'id': f.id,
'disk': f.disk,
'ram': f.ram,
'vcpus': f.vcpus,
'ephemeral': f.ephemeral,
'swap': f.swap,
'is_public': f.is_public,
'rxtx_factor': f.rxtx_factor,
'keys': f.get_keys()}
for f in nova.flavors.list()}

View File

@ -0,0 +1,50 @@
#!/usr/bin/env python
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.plugins.lookup import LookupBase
from tripleo_validations import utils
DOCUMENTATION = """
lookup: nova_hypervisor_statistics
description: Retrieve hypervisor statistic information from Nova
long_description:
- Load hypervisor statistics using the Nova API.
author: Brad P. Crochet <brad@redhat.com>
"""
EXAMPLES = """
- name: Get all hypervisor statistics from nova
debug:
msg: |
{{ lookup('nova_hypervisor_statistics') }}
"""
RETURN = """
_raw:
description: A Python list with results from the API call.
"""
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns server information from nova."""
nova = utils.get_nova_client(variables)
statistics = nova.hypervisor_stats.statistics()
return utils.filtered(statistics)

View File

@ -0,0 +1,91 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DOCUMENTATION = """
lookup: nova_servers
description: Retrieve server information from Nova
long_description:
- Load server information using the Nova API and search by attribute.
options:
_terms:
description: Optional filter attribute and filter value
author: Florian Fuchs <flfuchs@redhat.com>
"""
EXAMPLES = """
- name: Get all server ids from nova
debug:
msg: |
{{ lookup('nova_servers', wantlist=True) |
map(attribute='id') | join(', ') }}
- name: Lookup all server ids from nova with a certain ctlplane IP
debug:
msg: |
{{ lookup('nova_servers', 'ip', 'ctlplane', ['192.168.24.15'],
wantlist=True) | map(attribute='id') | join(', ') }}"
- name: Get server with name 'overcloud-controller-0'
debug:
msg: |
{{ lookup('nova_servers', 'name', ['overcloud-controller-0'],
wantlist=True) | map(attribute='name') }}"
"""
RETURN = """
_raw:
description: A Python list with results from the API call.
"""
from ansible.plugins.lookup import LookupBase
from novaclient.exceptions import NotFound
from tripleo_validations import utils
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns server information from nova."""
nova = utils.get_nova_client(variables)
servers = []
if len(terms) > 0:
# Look up servers by network and IP
if terms[0] == 'ip':
for ip in terms[2]:
try:
servers.append(nova.servers.find(
networks={terms[1]: [ip]}))
except NotFound:
pass
# Look up servers by attribute
else:
for value in terms[1]:
try:
search_data = {terms[0]: value}
servers.append(nova.servers.find(**search_data))
except NotFound:
pass
else:
servers = nova.servers.list()
# For each server only return properties whose value
# can be properly serialized. (Things like
# novaclient.v2.servers.ServerManager will make
# Ansible return the whole result as a string.)
return [utils.filtered(server) for server in servers]

View File

@ -0,0 +1,83 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from ansible.plugins.lookup import LookupBase
from tripleo_validations import utils
DOCUMENTATION = """
lookup: roles_info
description: Retrieve role information from Heat and Swift.
long_description:
- Load role information using the Heat API.
options:
_terms:
description: Optional filter attribute and filter value
author: Brad P. Crochet <brad@redhat.com>
"""
EXAMPLES = """
- name: Get all role info from Heat and Swift
debug:
msg: |
{{ lookup('roles_info', wantlist=True) }}
"""
RETURN = """
_raw:
description: A Python list with results from the API call.
"""
class LookupModule(LookupBase):
def _get_object_yaml(self, swiftclient, container, obj):
obj_ret = swiftclient.get_object(container=container, obj=obj)
return yaml.safe_load(obj_ret[1])
def run(self, terms, variables=None, **kwargs):
"""Returns server information from nova."""
swift = utils.get_swift_client(variables)
plan = variables.get('plan')
plan_env = self._get_object_yaml(swift, plan, 'plan-environment.yaml')
roles_data = self._get_object_yaml(swift, plan, 'roles_data.yaml')
def default_role_data(role):
return {
'name': role['name'],
'count': role.get('CountDefault', 0),
'flavor': None
}
roles = list(map(default_role_data, roles_data))
parameter_defaults = plan_env.get('parameter_defaults', {})
for role in roles:
new_count = parameter_defaults.get("%sCount" % role['name'])
if new_count:
role['count'] = new_count
new_flavor = parameter_defaults.get("Overcloud%sFlavor" %
role['name'])
if new_flavor:
role['flavor'] = new_flavor
return roles

View File

@ -0,0 +1,48 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.plugins.lookup import LookupBase
from tripleo_validations import utils
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns the current plan's stack resources.
:return: A list of dicts
"""
ret = []
heat = utils.get_heat_client(variables)
resource_list = heat.resources.list(variables['plan'])
for resource in resource_list:
ret.append(dict(
resource_name=resource.resource_name,
resource_status=resource.resource_status,
logical_resource_id=resource.logical_resource_id,
links=resource.links,
creation_time=resource.creation_time,
resource_status_reason=resource.resource_status_reason,
updated_time=resource.updated_time,
required_by=resource.required_by,
physical_resource_id=resource.physical_resource_id,
resource_type=resource.resource_type
))
return ret

47
lookup_plugins/tht.py Normal file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ansible.plugins.lookup import LookupBase
from tripleo_validations import utils
EXCLUDED_EXT = (
'.pyc',
'.pyo',
)
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Returns the current plan files.
Returns a list of tuples, one for each plan file,
containing the template path and the template content.
"""
ret = []
swift = utils.get_swift_client(variables)
container = swift.get_container(variables['plan'])
for item in container[1]:
obj = swift.get_object(variables['plan'], item['name'])
if os.path.splitext(item['name'])[-1] not in EXCLUDED_EXT:
ret.append((item['name'], obj))
return ret

View File

@ -0,0 +1,15 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify undercloud fits the CPU core requirements
description: >
Make sure that the undercloud has enough CPU cores.
https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux_OpenStack_Platform/7/html/Director_Installation_and_Usage/sect-Undercloud_Requirements.html
groups:
- prep
- pre-introspection
min_undercloud_cpu_count: 8
roles:
- undercloud-cpu

View File

@ -0,0 +1,15 @@
---
- hosts: undercloud
vars:
metadata:
name: Undercloud Services Debug Check
description: >
The undercloud's openstack services should _not_ have debug enabled.
This will check if debug is enabled on undercloud services.
If debug is enabled, the root filesystem can fill up quickly, and
is not a good thing.
groups:
- pre-deployment
debug_check: "True"
roles:
- undercloud-debug

View File

@ -0,0 +1,20 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify undercloud fits the disk space requirements to perform an upgrade
description: >
Make sure that the root partition on the undercloud node has enough
free space before starting an upgrade
http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements
groups:
- pre-upgrade
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var, min_size: 16}
- {mount: /, min_size: 20}
roles:
- undercloud-disk-space

View File

@ -0,0 +1,23 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify undercloud fits the disk space requirements
description: >
Make sure that the root partition on the undercloud node has enough
free space.
http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements
groups:
- prep
- pre-introspection
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var/log, min_size: 3}
- {mount: /usr, min_size: 5}
- {mount: /var, min_size: 20}
- {mount: /, min_size: 25}
roles:
- undercloud-disk-space

View File

@ -0,0 +1,15 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify heat-manage purge_deleted is enabled in crontab
description: >
Without a purge_deleted crontab enabled, the
heat database can grow very large. This validation checks that
the purge_deleted crontab has been set up.
groups:
- pre-upgrade
- pre-deployment
cron_check: "heat-manage purge_deleted"
roles:
- undercloud-heat-purge-deleted

View File

@ -0,0 +1,31 @@
---
- hosts: undercloud
vars:
metadata:
name: Undercloud Neutron Sanity Check
description: >
Run `neutron-sanity-check` on the undercloud node to find out
potential issues with Neutron's configuration.
The tool expects all the configuration files that are passed
to the Neutron services.
groups:
- pre-introspection
# The list of Neutron configuration files and directories that
# will be passed to the Neutron services. The order is important
# here: the values in later files take precedence.
configs:
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
- /usr/share/neutron/neutron-lbaas-dist.conf
- /etc/neutron/lbaas_agent.ini
roles:
- undercloud-neutron-sanity-check

View File

@ -0,0 +1,15 @@
---
- hosts: undercloud
vars:
metadata:
name: Check the number of OpenStack processes on undercloud
description: >
The default settings for OpenStack is to run one process (heat-engine,
keystone, etc.) per CPU core. On a machine with a lot of cores this is
both unnecessary and can consume a significant amount of RAM, leading
to crashes due to OOMKiller.
groups:
- pre-deployment
max_process_count: 8
roles:
- undercloud-process-count

View File

@ -0,0 +1,16 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify the undercloud fits the RAM requirements
description: >
Verify that the undercloud has enough RAM.
https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/14/html/director_installation_and_usage/planning-your-undercloud#determining-environment-scale
groups:
- prep
- pre-introspection
- pre-upgrade
min_undercloud_ram_gb: 24
roles:
- undercloud-ram

View File

@ -0,0 +1,12 @@
---
- hosts: undercloud
vars:
metadata:
name: Undercloud SELinux Enforcing Mode Check
description: >
Check if the Undercloud is running SELinux in Enforcing mode.
groups:
- prep
- pre-introspection
roles:
- undercloud-selinux-mode

View File

@ -0,0 +1,12 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify undercloud services state before running update or upgrade
description: >
Check undercloud status before running a stack update - especially minor update and major upgrade.
groups:
- post-upgrade
- pre-upgrade
roles:
- undercloud-service-status

View File

@ -0,0 +1,14 @@
---
- hosts: undercloud
vars:
metadata:
name: Verify token_flush is enabled in keystone users crontab
description: >
Without a token_flush crontab enabled for the keystone user, the
keystone database can grow very large. This validation checks that
the keystone token_flush crontab has been set up.
groups:
- pre-introspection
cron_check: "keystone-manage token_flush"
roles:
- undercloud-tokenflush

View File

@ -0,0 +1,36 @@
Undercloud-cpu
==============
An Ansible role to check if the Undercloud fits the CPU core requirements
Requirements
------------
This role could be used before or/and after the Undercloud installation.
Role Variables
--------------
- min_undercloud_cpu_count: <8> -- Minimal number of CPU core
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-cpu, min_undercloud_cpu_count: 42 }
License
-------
Apache 2.0
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,3 @@
---
min_undercloud_cpu_count: 8

View File

@ -0,0 +1,28 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []

View File

@ -0,0 +1,7 @@
---
- name: Verify the number of CPU cores
fail:
msg: >-
There are {{ ansible_processor_vcpus }} cores in the system,
but there should be at least {{ min_undercloud_cpu_count }}
failed_when: "ansible_processor_vcpus|int < min_undercloud_cpu_count|int"

View File

@ -0,0 +1,10 @@
---
metadata:
name: Verify undercloud fits the CPU core requirements
description: >
Make sure that the undercloud has enough CPU cores.
https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux_OpenStack_Platform/7/html/Director_Installation_and_Usage/sect-Undercloud_Requirements.html
groups:
- prep
- pre-introspection

View File

@ -0,0 +1,38 @@
Undercloud-debug
================
An Ansible role to check if debug is enabled on Undercloud services.
Requirements
------------
This role needs to be run against an installed Undercloud.
Role Variables
--------------
- debug_check: <True>
- services_conf_files: List of path for each services configuration files you
want to check
Dependencies
------------
- 'ini' custom plugin
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-debug }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,9 @@
---
debug_check: True
services_conf_files:
- /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf
- /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf
- /var/lib/config-data/puppet-generated/ceilometer/etc/ceilometer/ceilometer.conf
- /var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf
- /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf

View File

@ -0,0 +1,27 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []

View File

@ -0,0 +1,11 @@
---
- name: Check the services for debug flag
become: True
ini:
path: "{{ item }}"
section: DEFAULT
key: debug
ignore_missing_file: True
register: config_result
with_items: "{{ services_conf_files }}"
failed_when: "debug_check|bool == config_result.value|bool"

View File

@ -0,0 +1,10 @@
---
metadata:
name: Undercloud Services Debug Check
description: >
The undercloud's openstack services should _not_ have debug enabled.
This will check if debug is enabled on undercloud services.
If debug is enabled, the root filesystem can fill up quickly, and
is not a good thing.
groups:
- pre-deployment

View File

@ -0,0 +1,36 @@
Undercloud-disk-space
=====================
An Ansible role to verify if the Undercloud fits the disk space requirements.
Requirements
------------
This role could be used before or/and after the Undercloud installation.
Role Variables
--------------
- Volumes: a dictionary of mount points and their minimum sizes
Dependencies
------------
No Dependencies
Example Playbook
----------------
- hosts: servers
roles:
- { role: undercloud-disk-space}
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validation Team

View File

@ -0,0 +1,9 @@
---
volumes:
- {mount: /var/lib/docker, min_size: 10}
- {mount: /var/lib/config-data, min_size: 3}
- {mount: /var/log, min_size: 3}
- {mount: /usr, min_size: 5}
- {mount: /var, min_size: 20}
- {mount: /, min_size: 25}

View File

@ -0,0 +1,28 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []

View File

@ -0,0 +1,39 @@
---
- name: Set a constant defining number of Bytes in 1 GB
set_fact:
const_bytes_in_gb: 1073741824
- name: Stat volume directories
stat:
path: "{{ item.mount }}"
with_items: "{{ volumes }}"
register: volumes_stat
- name: Initialize existing_volumes to an empty array
set_fact:
existing_volumes="{{ [] }}"
- name: Filter out non-existing volumes
set_fact:
existing_volumes: "{{ existing_volumes +[item.item] }}"
with_items: "{{ volumes_stat.results }}"
when: item.stat.exists
loop_control:
label: "{{ item.item.mount }}"
- name: Loop on volumes and gather available space
shell: df -B1 {{ item.mount }} --output=avail | sed 1d
register: volume_size
with_items: "{{ existing_volumes }}"
changed_when: False
- name: Fail if any of the volumes are too small
fail:
msg: >
Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G
- current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G
when: >
item.stdout|int / const_bytes_in_gb|int < item.item.min_size|int
with_items: "{{ volume_size.results }}"
loop_control:
label: "{{ item.item.mount }}"

View File

@ -0,0 +1,11 @@
---
metadata:
name: Verify undercloud fits the disk space requirements
description: >
Make sure that the root partition on the undercloud node has enough
free space.
http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements
groups:
- prep
- pre-introspection

View File

@ -0,0 +1,38 @@
Undercloud-heat-purge-deleted
=============================
An Ansible role to check if `heat-manage purge_deleted` is enabled in the
crontab
Requirements
------------
This role requires an installed and working Undercloud.
Role Variables
--------------
- cron_check: <'heat-manage purge_deleted'> -- String to check in the crontab
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-heat-purge-deleted }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,3 @@
---
cron_check: "heat-manage purge_deleted"

View File

@ -0,0 +1,29 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies:
- role: validation-db-utils

View File

@ -0,0 +1,29 @@
---
- name: Get the path of tripleo undercloud config file
become: true
hiera: name="tripleo_undercloud_conf_file"
- name: Get the Container CLI from the undercloud.conf file
become: true
ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Get heat crontab
become: true
shell: |
set -o pipefail
{{ container_cli.value|default('podman') }} exec heat_api_cron crontab -l -u heat |grep -v '^#'
register: cron_result
changed_when: False
- name: Check heat crontab
fail:
msg: >-
heat-manage purge_deleted does not appear to be enabled via cron. You
should add '<desired interval> heat-manage purge_deleted' to the heat
users crontab.
when: "cron_result.stdout.find('heat-manage purge_deleted') == -1"

View File

@ -0,0 +1,10 @@
---
metadata:
name: Verify heat-manage purge_deleted is enabled in crontab
description: >
Without a purge_deleted crontab enabled, the
heat database can grow very large. This validation checks that
the purge_deleted crontab has been set up.
groups:
- pre-upgrade
- pre-deployment

View File

@ -0,0 +1,38 @@
Role Name
=========
An Ansible roles to check for potential issues with Neutron's configuration
Requirements
------------
This role needs an installed and working Undercloud
Role Variables
--------------
- configs: A list of Neutron configuration files and directories that will be
passed to the Neutron services. The order is important here, the values in
later files take precedence.
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-neutron-sanity-check }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,12 @@
---
configs:
- /etc/neutron/neutron.conf
- /usr/share/neutron/neutron-dist.conf
- /etc/neutron/metadata_agent.ini
- /etc/neutron/dhcp_agent.ini
- /etc/neutron/plugins/ml2/openvswitch_agent.ini
- /etc/neutron/fwaas_driver.ini
- /etc/neutron/l3_agent.ini
- /usr/share/neutron/neutron-lbaas-dist.conf
- /etc/neutron/lbaas_agent.ini

View File

@ -0,0 +1,29 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies:
- role: validation-db-utils

View File

@ -0,0 +1,53 @@
---
- name: Get the path of tripleo undercloud config file
become: true
hiera: name="tripleo_undercloud_conf_file"
- name: Get the Container CLI from the undercloud.conf file
become: true
ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Run neutron-sanity-check
command: >
{{ container_cli.value|default('podman') }}
exec -u root neutron_ovs_agent
/bin/bash -c 'neutron-sanity-check --config-file {{ item }}'
with_items: "{{ configs }}"
become: true
register: nsc_return
ignore_errors: true
changed_when: False
- name: Detect errors
set_fact:
has_errors: "{{ nsc_return.results
| sum(attribute='stderr_lines', start=[])
| select('search', '(ERROR)')
| list | length | int > 0 }}"
- name: Detect warnings
set_fact:
has_warnings: "{{ nsc_return.results
| sum(attribute='stderr_lines', start=[])
| select('search', '(WARNING)')
| list | length | int > 0 }}"
- name: Create output
set_fact:
output_msg: "{{ nsc_return.results
| sum(attribute='stderr_lines', start=[])
| select('search', '(ERROR|WARNING)')
| list }}"
- name: Output warning
warn: msg="{{ output_msg | join('\n') }}"
when: has_warnings and not has_errors
- name: Fail
fail: msg="{{ output_msg | join('\n') }}"
when: has_errors

View File

@ -0,0 +1,11 @@
---
metadata:
name: Undercloud Neutron Sanity Check
description: >
Run `neutron-sanity-check` on the undercloud node to find out
potential issues with Neutron's configuration.
The tool expects all the configuration files that are passed
to the Neutron services.
groups:
- pre-introspection

View File

@ -0,0 +1,37 @@
Role Name
=========
An Ansible role to check the number of OpenStack processes on the Undercloud
Requirements
------------
This role requires an installed and working Undercloud
Role Variables
--------------
- max_process_count: <'8'> -- Maximum number of process
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: servers
roles:
- { role: undercloud-process-count }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,3 @@
---
max_process_count: 8

View File

@ -0,0 +1,28 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies:
- role: validation-lib-utils

View File

@ -0,0 +1,50 @@
---
- name: Get the path of tripleo undercloud config file
become: true
hiera:
name: "tripleo_undercloud_conf_file"
- name: Get the Container CLI from the undercloud.conf file
become: true
ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Collect the number of running processes per OpenStack service
command: "{{ container_cli.value|default('podman') }} exec {{ item.container }} pgrep -f -c {{ item.proc }}"
become: true
ignore_errors: yes
register: "process_count"
changed_when: False
loop:
- {container: "heat_engine", proc: "heat-engine"}
- {container: "ironic_inspector", proc: "ironic-inspector"}
- {container: "ironic_conductor", proc: "ironic-conductor"}
- {container: "nova_api", proc: "nova_api"}
- {container: "nova_scheduler", proc: "nova-scheduler"}
- {container: "nova_conductor", proc: "nova-conductor"}
- {container: "nova_compute", proc: "nova-compute"}
- {container: "glance_api", proc: "glance-api"}
- {container: "swift_proxy", proc: "swift-proxy-server"}
- {container: "swift_object_server", proc: "swift-object-server"}
- {container: "swift_container_server", proc: "swift-container-server"}
- {container: "zaqar", proc: "zaqar"}
- {container: "zaqar_websocket", proc: "zaqar-server"}
- {container: "mistral_api", proc: "mistral-server"}
- {container: "mistral_engine", proc: "mistral-server"}
- {container: "mistral_executor", proc: "mistral-server"}
- name: Create warning messages
command: echo "There are {{ item.stdout }} {{ item.item }} processes running. Having more than {{ max_process_count }} risks running out of memory."
register: process_warnings
with_items: "{{ process_count.results }}"
when: "item.stdout|int > max_process_count"
- name: Output warning message
warn: msg={{ warning_msg }}
when: "warning_msg|length > 0"
vars:
warning_msg: "{{ process_warnings.results|selectattr('changed')|map(attribute='stdout')|join('\n') }}"

View File

@ -0,0 +1,10 @@
---
metadata:
name: Check the number of OpenStack processes on undercloud
description: >
The default settings for OpenStack is to run one process (heat-engine,
keystone, etc.) per CPU core. On a machine with a lot of cores this is
both unnecessary and can consume a significant amount of RAM, leading
to crashes due to OOMKiller.
groups:
- pre-deployment

View File

@ -0,0 +1,36 @@
Undercloud-ram
==============
An Ansible role to check if the Undercloud fits the RAM requirements
Requirements
------------
This role could be used before or/and after the Undercloud installation
Role Variables
--------------
- min_undercloud_ram_gb: <24> -- Minimal amount of RAM in GB
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-ram, min_undercloud_ram_gb: 24 }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,3 @@
---
min_undercloud_ram_gb: 24

View File

@ -0,0 +1,27 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []

View File

@ -0,0 +1,8 @@
---
- name: Verify the RAM requirements
fail:
msg: >-
The RAM on the undercloud node is {{ ansible_memtotal_mb }} MB,
the minimal recommended value is {{ min_undercloud_ram_gb|int * 1024 }} MB.
# NOTE(shadower): converting GB to MB
failed_when: "(ansible_memtotal_mb) < min_undercloud_ram_gb|int * 1024"

View File

@ -0,0 +1,11 @@
---
metadata:
name: Verify the undercloud fits the RAM requirements
description: >
Verify that the undercloud has enough RAM.
https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/14/html/director_installation_and_usage/planning-your-undercloud#determining-environment-scale
groups:
- prep
- pre-introspection
- pre-upgrade

View File

@ -0,0 +1,37 @@
Undercloud-selinux-mode
=======================
An Ansible role to check the Undercloud SELinux Enforcing mode
Requirements
------------
This role could be used before or/and after the Undercloud installation
Role Variables
--------------
None
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-selinux-mode }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,28 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []

View File

@ -0,0 +1,24 @@
---
- name: Get current SELinux mode
command: getenforce
become: true
register: sestatus
changed_when: False
- name: Fail if SELinux is not in Enforced mode (RHEL)
fail:
msg: >-
SELinux is running in {{ sestatus.stdout }} mode on the Undercloud.
Ensure that SELinux is enabled and running in Enforcing mode.
when:
- "sestatus.stdout != 'Enforcing'"
- "ansible_distribution == 'RedHat'"
- name: Warn if SELinux is not in Enforced mode (CentOS)
warn:
msg: >-
SELinux is running in {{ sestatus.stdout }} mode on the Undercloud.
Ensure that SELinux is enabled and running in Enforcing mode.
when:
- "sestatus.stdout != 'Enforcing'"
- "ansible_distribution == 'CentOS'"

View File

@ -0,0 +1,8 @@
---
metadata:
name: Undercloud SELinux Enforcing Mode Check
description: >
Check if the Undercloud is running SELinux in Enforcing mode.
groups:
- prep
- pre-introspection

View File

@ -0,0 +1,38 @@
Undercloud-service-status
=========================
An Ansible role to verify the Undercloud services states before running an
Update or Upgrade.
Requirements
------------
This role needs to be run against an installed Undercloud.
Role Variables
--------------
- undercloud_service_list: A list of services actually coming from the tripleo-ansible-inventory
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-service-status }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team.

View File

@ -0,0 +1,9 @@
---
undercloud_service_list:
- openstack-nova-compute
- openstack-heat-engine
- openstack-ironic-conductor
- openstack-swift-container
- openstack-swift-object
- openstack-mistral-engine

View File

@ -0,0 +1,28 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []

View File

@ -0,0 +1,18 @@
---
- name: Check Services are running
command: "/usr/bin/systemctl show {{ item }} --property ActiveState"
become: true
with_items: "{{ undercloud_service_list }}"
register: "check_services"
changed_when: False
ignore_errors: true
- name: Fail if services were not running
fail:
msg: >-
One of the undercloud services was not active.
Please check {{ item.item }} first and then confirm the status of
undercloud services in general before attempting to update or
upgrade the environment.
failed_when: "item.stdout != 'ActiveState=active'"
with_items: "{{ check_services.results }}"

View File

@ -0,0 +1,8 @@
---
metadata:
name: Verify undercloud services state before running update or upgrade
description: >
Check undercloud status before running a stack update - especially minor update and major upgrade.
groups:
- post-upgrade
- pre-upgrade

View File

@ -0,0 +1,37 @@
Undercloud-tokenflush
=====================
An Ansible role to check if `keystone-manage token_flush` is enabled for the keystone user.
Requirements
------------
This role requires an installed and working Undercloud.
Role Variables
--------------
- cron_check: <'keystone-manage token_flush'> -- the string to check in the crontab
Dependencies
------------
No dependencies.
Example Playbook
----------------
- hosts: undercloud
roles:
- { role: undercloud-tokenflush }
License
-------
Apache
Author Information
------------------
Red Hat TripleO Validations Team

View File

@ -0,0 +1,3 @@
---
cron_check: "keystone-manage token_flush"

View File

@ -0,0 +1,29 @@
galaxy_info:
author: TripleO Validations Team
company: Red Hat
license: Apache
min_ansible_version: 2.4
platforms:
- name: CentOS
versions:
- 7
- name: RHEL
versions:
- 7
categories:
- cloud
- baremetal
- system
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies:
- role: validation-lib-utils

View File

@ -0,0 +1,29 @@
---
- name: Get the path of tripleo undercloud config file
become: true
hiera: name="tripleo_undercloud_conf_file"
- name: Get the Container CLI from the undercloud.conf file
become: true
ini:
path: "{{ tripleo_undercloud_conf_file }}"
section: DEFAULT
key: container_cli
ignore_missing_file: true
register: container_cli
- name: Get keystone crontab
become: true
shell: |
set -o pipefail
{{ container_cli.value|default('podman') }} exec keystone_cron crontab -l -u keystone |grep -v '^#'
register: cron_result
changed_when: False
- name: Check keystone crontab
fail:
msg: >-
keystone token_flush does not appear to be enabled via cron.
You should add '<desired interval> keystone-manage token_flush'
to the keystone users crontab."
when: "cron_result.stdout.find('keystone-manage token_flush') == -1"

View File

@ -0,0 +1,9 @@
---
metadata:
name: Verify token_flush is enabled in keystone users crontab
description: >
Without a token_flush crontab enabled for the keystone user, the
keystone database can grow very large. This validation checks that
the keystone token_flush crontab has been set up.
groups:
- pre-introspection

View File

@ -29,6 +29,11 @@ scripts =
data_files =
share/openstack-tripleo-validations/ = hosts.sample
share/openstack-tripleo-validations/validations = validations/*
share/openstack-tripleo-validations/roles = roles/*
share/openstack-tripleo-validations/playbooks = playbooks/*
share/openstack-tripleo-validations/callback_plugins = callback_plugins/*
share/openstack-tripleo-validations/lookup_plugins = lookup_plugins/*
share/openstack-tripleo-validations/library = library/*
[build_sphinx]
source-dir = doc/source