Retire Tripleo: remove repo content

TripleO project is retiring
- https://review.opendev.org/c/openstack/governance/+/905145

this commit remove the content of this project repo

Change-Id: I96704be72b4d2f5f776715d10f71799bba153c67
This commit is contained in:
Ghanshyam Mann 2024-02-24 11:34:23 -08:00
parent 0fd72e2100
commit f543b1f66a
171 changed files with 8 additions and 10227 deletions

View File

@ -1,15 +0,0 @@
parseable: true
rulesdir:
- ./ci-scripts/ansible_rules/
quiet: false
skip_list:
- ANSIBLE0006 # Using command rather than module we have a few use cases
# where we need to use curl and rsync
- ANSIBLE0007 # Using command rather than an argument to e.g file
# we have a lot of 'rm' command and we should use file module instead
- ANSIBLE0010 # Package installs should not use latest.
# Sometimes we need to update some packages.
- ANSIBLE0013 # Use Shell only when shell functionality is required
- ANSIBLE0016 # Tasks that run when changed should likely be handlers
# this requires refactoring roles, skipping for now
verbosity: 1

68
.gitignore vendored
View File

@ -1,68 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
doc/build/
# PyBuilder
target/
# virtualenv
.venv/
# jenkins config
jenkins/config.ini
playbooks/debug.yml
# Files created by releasenotes build
releasenotes/build
# Editors
.*.sw[klmnop]

View File

@ -1,46 +0,0 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.4.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
- id: flake8
- id: check-yaml
files: .*\.(yaml|yml)$
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.20.0
hooks:
- id: yamllint
files: \.(yaml|yml)$
types: [file, yaml]
entry: yamllint --strict -f parsable
- repo: https://github.com/ansible/ansible-lint.git
rev: v4.2.0
hooks:
- id: ansible-lint
always_run: true
pass_filenames: false
# do not add file filters here as ansible-lint does not give reliable
# results when called with individual files.
# https://github.com/ansible/ansible-lint/issues/611
verbose: true
entry: env ANSIBLE_LIBRARY=library ansible-lint --force-color -p -v
- repo: https://github.com/openstack-dev/bashate.git
rev: 0.6.0
hooks:
- id: bashate
entry: bashate --error . --ignore=E006,E040
# Run bashate check for all bash scripts
# Ignores the following rules:
# E006: Line longer than 79 columns (as many scripts use jinja
# templating, this is very difficult)
# E040: Syntax error determined using `bash -n` (as many scripts
# use jinja templating, this will often fail and the syntax
# error will be discovered in execution anyway)

View File

@ -1,6 +0,0 @@
---
extends: default
rules:
line-length:
max: 180

201
LICENSE
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,342 +1,10 @@
===============
tripleo-upgrade
===============
This project is no longer maintained.
This role aims to provide a unified tool for upgrading TripleO based deploments.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
Requirements
------------
This role requires:
* An ansible inventory file containing reacheable undercloud and overcloud nodes
* Nodes in the inventory file are placed in groups based on their roles (e.g compute nodes are part of the 'compute' group)
* Repositories containing packages to be upgraded are already installed on undercloud and overcloud nodes (or, for overcloud, define an upgrade_init_command variable)
* The initial overcloud deploy command is placed in a script file located in the path set by the overcloud_deploy_script var. Each option/environment file should be placed on a separate new line, e.g::
source ~/stackrc
export THT=/usr/share/openstack-tripleo-heat-templates/
openstack overcloud deploy --templates $THT \
-r ~/openstack_deployment/roles/roles_data.yaml \
-e $THT/environments/network-isolation.yaml \
-e $THT/environments/network-management.yaml \
-e $THT/environments/storage-environment.yaml \
-e ~/openstack_deployment/environments/nodes.yaml \
-e ~/openstack_deployment/environments/network-environment.yaml \
-e ~/openstack_deployment/environments/disk-layout.yaml \
-e ~/openstack_deployment/environments/neutron-settings.yaml \
--log-file overcloud_deployment.log &> overcloud_install.log
Role Variables
--------------
Available variables are listed below.
Only create upgrade scripts without running them::
upgrade_noop: false
Only create update scripts without running them::
update_noop: false
Run undercloud upgrade::
undercloud_upgrade: false
Run containerized undercloud upgrade::
containerized_undercloud_upgrade: false
Run overcloud upgrade::
overcloud_upgrade: false
Run undercloud update::
undercloud_update: false
Run overcloud update::
overcloud_update: false
Validate overcloud after update::
overcloud_images_validate: false
Location of the initial overcloud deploy script::
overcloud_deploy_script: "~/overcloud_deploy.sh"
Location of the undercloud upgrade script which is going to be generated by this role::
undercloud_upgrade_script: "~/undercloud_upgrade.sh"
Location of the upgrade script used in the composable docker upgrade step which is going to be generated by this role::
overcloud_composable_upgrade_script: "~/composable_docker_upgrade.sh"
Location of the upgrade script used in the converge docker upgrade step which is going to be generated by this role::
overcloud_converge_upgrade_script: "~/converge_docker_upgrade.sh"
Location of the undercloud credentials file::
undercloud_rc: "~/stackrc"
Location of the overcloud credentials file::
overcloud_rc: "~/overcloudrc"
Allows the user to apply known issues workarounds during the upgrade process. The list of patches/commands used for workarounds should be passed via --extra-vars and it should include dictionaries for undercloud/overcloud workarounds::
upgrade_workarounds: false
Set to true when the deployment has been done by tripleo quickstart::
use_oooq: false
Set to true to launch an instance before starting upgrade. This can be useful for running tests during upgrade such as live migration or floating IP connectivity checks::
workload_launch: false
Set to true to cleanup previously launched workload when update/upgrade finishes::
workload_cleanup: false
Name of the external network providing floating IPs for instance connectivity. This provides external connectivity and needs to exist beforehand, created by the user::
external_network_name: "public"
URL of the image used for the workload instance::
workload_image_url: "https://download.cirros-cloud.net/0.6.0/cirros-0.6.0-x86_64-disk.img"
Amount of memory assigned for the workload instance::
workload_memory: "512"
Set to true to use an SRIOV PF port when workload is created. Notice this will not work with cirros images::
workload_sriov: false
Set to true when running the role in the TripleO CI jobs. It avoids losing connectivity to the undercloud by skipping reboot and ssh kill tasks::
tripleo_ci: false
Bash commands, defines a custom upgrade init to be taken into account during overcloud upgrade::
upgrade_init_command: |
sudo tripleo-repos -b pike current
Set it to true to get a multi-cell update. It changes the way the oc_roles_hosts is calculated::
update_cell: false
When set to true add a vm with attached fip and monitor ping from the undercloud. If ping loss time is higher than `loss_threshold` seconds or `loss_threshold_percent` in percentage we fail::
l3_agent_connectivity_check: false
For update run tasks we set a 0 seconds loss threshold by default::
update_loss_threshold: 0
Default time is 60 seconds for ping loss::
loss_threshold: 60
Failsafe percentage check for loss threashold in percentage::
loss_threshold_percent: 1
Set to true to enable validations::
updates_validations: true
Enable extra logs during update. Default to true. It collects in /var/log/extras/ the output of the commands from collect_logs.yaml for every stages of the update run::
log_stages: true
Dependencies
------------
None.
Example Playbook
----------------
An example playbook is provided in tests/test.yml::
- hosts: undercloud
gather_facts: true
become: true
become_method: sudo
become_user: stack
roles:
- tripleo-upgrade
Usage with tripleo Quickstart
-----------------------------
After a successful deployment with OOOQ, you can create the necessary
scripts using this example playbook (duplicate from
./tests/oooq-test.yaml)::
---
- hosts: undercloud
gather_facts: true
become: true
become_method: sudo
become_user: stack
roles:
- { role: tripleo-upgrade, use_oooq: 'true'}
And then you run it like this (adjust the paths to your oooq specific
one)::
ANSIBLE_SSH_ARGS="-F $(pwd)/ssh.config.ansible" \
ANSIBLE_CONFIG=$PWD/ansible.cfg \
ansible-playbook -i hosts -vvv tripleo-upgrade/tests/oooq-test.yaml
This will only create the file (without running the actual upgrade):
- undercloud_upgrade.sh
- composable_docker_upgrade.sh
- overcloud-compute-\*_upgrade_pre.sh
- overcloud-compute-\*_upgrade.sh
- overcloud-compute-\*_upgrade_post.sh
- converge_docker_upgrade.sh
with the correct parameters.
Usage with InfraRed
-------------------
tripleo-upgrade comes preinstalled as an InfraRed plugin.
In order to install it manually, the following InfraRed command should be used::
infrared plugin add tripleo-upgrade
# add with a specific revision / branch
infrared plugin add --revision stable/rocky tripleo-upgrade
After a successful InfraRed overcloud deployment you need to run the following steps to upgrade the deployment:
Symlink roles path::
ln -s $(pwd)/plugins $(pwd)/plugins/tripleo-upgrade/infrared_plugin/roles
Set up undercloud upgrade repositories::
infrared tripleo-undercloud \
--upgrade yes \
--mirror ${mirror_location} \
--ansible-args="tags=upgrade_repos"
Set up undercloud update repositories::
infrared tripleo-undercloud \
--update-undercloud yes \
--mirror ${mirror_location} \
--build latest \
--version 12 \
--ansible-args="tags=upgrade_repos"
Upgrade undercloud::
infrared tripleo-upgrade \
--undercloud-upgrade yes
Update undercloud::
infrared tripleo-upgrade \
--undercloud-update yes
Set up overcloud upgrade repositories::
infrared tripleo-overcloud \
--deployment-files virt \
--upgrade yes \
--mirror ${mirror_location} \
--ansible-args="tags=upgrade_collect_info,upgrade_repos"
Set up overcloud update repositories/containers::
infrared tripleo-overcloud \
--deployment-files virt \
--ocupdate True \
--build latest \
--ansible-args="tags=update_collect_info,update_undercloud_validation,update_repos,update_prepare_containers"
Upgrade overcloud::
infrared tripleo-upgrade \
--overcloud-upgrade yes
Update overcloud::
infrared tripleo-upgrade \
--overcloud-update yes
Advanced upgrade options
------------------------
Operator can now specify order of roles to upgrade by using *roles_upgrade_order* variable.
It's the **responsibility** of operator to specify *Controller* role first followed by all other roles.
*roles_upgrade_order* variable expects roles being separated by *;(semicolon)*, for e.g.:
::
infrared tripleo-upgrade \
--overcloud-upgrade yes \
-e 'roles_upgrade_order=ControllerOpenstack;Database;Messaging'
will upgrade ControllerOpenstack group, then Database and finally Messaging.
Multiple roles could be upgraded in parallel, to achieve this they should be separated by *,(comma)*, for e.g:
::
infrared tripleo-upgrade \
--overcloud-upgrade yes \
-e 'roles_upgrade_order=ControllerOpenstack,Database;Messaging'
will upgrade Controller and Database groups in parallel and then continue with Messaging.
Running the role manually from the undercloud
---------------------------------------------
This role can be run manually from the undercloud by doing the following steps:
Note: before starting the upgrade process make sure that both the undercloud
and overcloud nodes have the repositories with upgraded packages set up
Clone this repository
git clone https://opendev.org/openstack/tripleo-upgrade
Set ansible roles path::
ANSIBLE_ROLES_PATH=$(pwd)
Create inventory file::
printf "[undercloud]\nlocalhost ansible_connection=local" > hosts
Run the playbook including this role::
ansible-playbook -i hosts tripleo-upgrade/tests/test.yml
=======
License
=======
Apache License 2.0
==================
Author Information
==================
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@ -1,6 +0,0 @@
# These are required here because ansible can't be in global-requirements due
# to licensing conflicts. But we sill need to be able to pull them in for
# lint checks and want to document these as ansible specific things that may
# be required for this repository.
ansible-core
ansible-lint

View File

@ -1,14 +0,0 @@
[defaults]
gathering = smart
command_warnings = False
retry_files_enabled = False
callback_whitelist = profile_tasks
# Attempt to load custom modules whether it's installed system-wide or from a virtual environment
test_plugins = test_plugins:$VIRTUAL_ENV/usr/local/share/tripleo-upgrade/playbooks/test_plugins:playbooks/test_plugins
library = library:$VIRTUAL_ENV/usr/local/share/tripleo-upgrade/playbooks/library:playbooks/library
roles_path = roles:$VIRTUAL_ENV/usr/local/share/ansible/roles/tripleo-upgrade:$VIRTUAL_ENV/usr/local/share/
[ssh_connection]
control_path = %(directory)s/%C

View File

@ -1,53 +0,0 @@
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansiblelint import AnsibleLintRule
def incorrect_task(task, cmd):
if 'shell' not in task:
return False
if 'register' in task:
return False
if task.get('ignore_errors'):
return False
if isinstance(task['shell'], dict):
args = task['shell']['cmd'].split()
else:
args = task['shell'].split()
if not set(args).isdisjoint(cmd) and 'pipefail' not in args:
return True
return False
class ShellPipefail(AnsibleLintRule):
id = 'OOOQ0001'
shortdesc = 'Shell should have a pipefail'
description = 'Shell commands should have "set -o pipefail" if using PIPE'
tags = ['shell']
cmd = ["|", "timestamper_cmd"]
def matchplay(self, file, play):
ret = []
if play.get('block') and not play.get('ignore_errors'):
block = play['block']
for task in block:
if incorrect_task(task, self.cmd):
ret.append((file, self.shortdesc))
else:
if incorrect_task(play, self.cmd):
ret.append((file, self.shortdesc))
return ret

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
rm -rf releasenotes/build
sphinx-build -a -E -W \
-d releasenotes/build/doctrees \
-b html \
releasenotes/source releasenotes/build/html
BUILD_RESULT=$?
UNCOMMITTED_NOTES=$(git status --porcelain | \
awk '$1 ~ "M|A|??" && $2 ~ /releasenotes\/notes/ {print $2}')
if [ "${UNCOMMITTED_NOTES}" ]; then
cat <<EOF
REMINDER: The following changes to release notes have not been committed:
${UNCOMMITTED_NOTES}
While that may be intentional, keep in mind that release notes are built from
committed changes, not the working directory.
EOF
fi
exit ${BUILD_RESULT}

View File

@ -1,331 +0,0 @@
---
# defaults file for tripleo-upgrade
# main vars:
working_dir: "{{ (ansible_env|default({})).HOME|default('/home/stack') }}"
# undercloud.conf
undercloud_conf: "{{ working_dir }}/undercloud.conf"
# overcloud stack name
overcloud_stack_name: "overcloud"
# TODO: those variable can be changed for the explicit keyword in tag
# when https://github.com/ansible/ansible/issues/11045 is merged.
# enable update/upgrade
upgrade_noop: false
update_noop: false
undercloud_upgrade: false
undercloud_os_upgrade: false
containerized_undercloud_upgrade: true
overcloud_upgrade: false
undercloud_update: false
overcloud_update: false
overcloud_images_validate: false
overcloud_batch_update: false
# enable ffu upgrade
ffu_undercloud_upgrade: false
ffu_undercloud_os_upgrade: false
ffu_overcloud_upgrade: false
ffu_noop: false
ffu_bulk: true
ffu_computes_rolling: false
# enable all at once FFWD upgrade.
# This procedure will pause ceph and upgrade the whole overcloud
# at once.
fast_and_furious: false
# enable cells update
update_cell: false
# the role isn't run agains a valid OpenStack
# environment. This option (as well as the noop ones)
# are used when testing role's code.
mock_environment: "{{ upgrade_noop or update_noop or ffu_noop }}"
# enable upgrade workarounds
upgrade_workarounds: false
# enable update workarounds
updates_workarounds: false
# enable group validations execution
run_validations: false
# comma separated list of skipped validations
skiplist_validations: ''
# Validation Ansible inventory for Upgrade
upgrade_validation_inventory: "{{ working_dir }}/overcloud-deploy/{{ overcloud_stack_name }}/config-download/{{ overcloud_stack_name }}/tripleo-ansible-inventory.yaml"
# extra arguments to pass in the validations group
# execution.
# Example: validations_extra_args: "--extra-vars min_undercloud_ram_gb=5"
validations_extra_args: ''
# validations groups for updates to be
# executed if updates_validations is true.
updates_validations_groups:
- pre-update
- pre-update-prepare
- pre-update-run
- post-update
# validations groups for updates to be
# executed if updates_validations is true.
upgrades_validations_groups:
- pre-upgrade
- pre-system-upgrade
- pre-undercloud-upgrade
- pre-overcloud-prepare
- pre-overcloud-upgrade
- pre-overcloud-converge
- post-upgrade
# enable ffu workarounds
ffu_upgrade_workarounds: false
# use oooq
use_oooq: false
# Running in tripleo ci
tripleo_ci: false
# rc files:
undercloud_rc: "{{ working_dir }}/stackrc"
overcloud_rc: "{{ working_dir }}/{{ overcloud_stack_name }}rc"
# required for logstash indexing
timestamper_cmd: >-
| awk '{ print strftime("%Y-%m-%d %H:%M:%S |"), $0; fflush(); }'
launch_sanity_workload: true
# launch workload before update/upgrade
workload_launch: false
workload_cleanup: false
external_network_name: "public"
workload_image_url: "https://download.cirros-cloud.net/0.6.0/cirros-0.6.0-x86_64-disk.img"
workload_memory: "512"
workload_user: "cirros"
workload_disk: 5
workload_vcpu: 1
workload_swap: 512
workload_sriov: false
# upgrade scripts name:
undercloud_upgrade_script: "{{ working_dir }}/undercloud_upgrade.sh"
undercloud_update_script: "{{ working_dir }}/undercloud_update.sh"
overcloud_deploy_script: "{{ working_dir }}/overcloud_deploy.sh"
overcloud_upgrade_prepare_script: "{{ working_dir }}/overcloud_upgrade_prepare.sh"
overcloud_system_upgrade_script_base: "{{ working_dir }}/overcloud_system_upgrade"
overcloud_upgrade_run_script_base: "{{ working_dir }}/overcloud_upgrade_run"
overcloud_upgrade_converge_script: "{{ working_dir }}/overcloud_upgrade_converge.sh"
workload_launch_script: "{{ working_dir }}/workload_launch.sh"
# overcloud update scripts
overcloud_update_prepare_script: "{{ working_dir }}/overcloud_update_prepare.sh"
overcloud_update_run_script_base: "{{ working_dir }}/overcloud_update_run"
overcloud_validate_images_script: "{{ working_dir }}/validate_docker_images_versions.sh"
overcloud_validate_ap_services: openstack-cinder-volume openstack-cinder-backup openstack-manila-share
# container registry file for overcloud
container_registry_file: "docker-images.yaml"
# container prepare params env file
uc_containers_prepare_file: "containers-prepare-parameter.yaml"
# url of the remote docker registry to be used
docker_registry_url: 'registry.example.local'
# use upstream or downstream container images
upstream_container_images: true
# tripleo-heat-templates directory
tht_directory: "/usr/share/openstack-tripleo-heat-templates"
# default roles_data.yaml
default_roles_data: "{{ tht_directory }}/roles_data.yaml"
# default network_data.yaml
default_network_data: "{{ tht_directory }}/network_data.yaml"
# local registry environment file location
containers_default_parameters: "{{ working_dir }}/docker-osp12.yaml"
# container_images.yaml file location
container_images_location: "{{ working_dir }}/container_images.yaml"
# comma seperated list of container names that are expected to be not updated before container test
# Exclude nova_virtlogd by default, because container iamge test happens before overcloud reboot
# https://opendev.org/openstack/tripleo-heat-templates/src/commit/64a52f31507f464a0437aac0a53f65250845324b/releasenotes/notes/nova_virtlogd_wrapper-120fcfcfa0787b2b.yaml
excluded_containers_from_check: "nova_virtlogd"
# undercloud_hiera.yaml file location
undercloud_hiera: "{{ working_dir }}/undercloud_hiera.yaml"
# reboot nodes post upgrade
undercloud_reboot: false
controller_reboot: false
force_reboot: false
# time to wait for nodes to reboot in seconds
node_reboot_timeout: 300
# enable l3 agent connectivity check during upgrade
l3_agent_connectivity_check: false
l3_agent_connectivity_check_start_script: "{{ working_dir }}/l3_agent_start_ping.sh"
l3_agent_connectivity_check_wait_script: "{{ working_dir }}/l3_agent_wait_ping.sh"
l3_agent_connectivity_check_stop_script: "{{ working_dir }}/l3_agent_stop_ping.sh"
l3_agent_failover_check: false
# logs
log_playbook: "{{ working_dir }}/collect_log.yaml"
log_playbook_script: "{{ working_dir }}/collect_log"
log_stages: true
# enable web load test
fip_http_check: false
# migrate instances between compute nodes during upgrade
compute_evacuate: false
compute_cold_evacuate: false
compute_migration_timeout: 120
# workload exist prior to upgrade
workload_external: false
# enable post upgrade checks
controller_upgrade_post: false
# Provide a custom ssh-config file
need_ssh_config: true
# Extra environment files to pass into upgrade prepare command appending them
# in a list.
# Format: ['env_file1', 'env_file2', 'env_file3']
# It will be rendered joining each file with ' -e ' when running the command
# 'openstack overcloud upgrade prepare', so it will be possible to add
# extra parameters in a flexible way
upgrade_prepare_extra_params: []
# No upgrade_init_command by default
upgrade_init_command_dest: "{{working_dir}}/UpgradeInitCommand.yaml"
upgrade_init_command: false
# ffu undercloud upgrade
ffu_undercloud_releases: ['rocky', 'stein', 'train']
ffu_undercloud_repo_type: tripleo-repos
ffu_undercloud_repo_args:
tripleo_repos:
rocky: "-b rocky current"
stein: "-b stein current"
train: "-b train current"
# Tripleo OOOQ variables.
composable_scenario: ""
upgrade_composable_scenario: ""
## ffu overcloud upgrade
ffu_overcloud_releases: ['rocky', 'stein', 'train']
## ffu overcloud to use custom scripts instead of tripleo-repos
ffu_overcloud_repo_type: tripleo-repos
## overcloud ssh user
overcloud_ssh_user: ''
# when set to True it allows running most of the
# openstack overcloud commands with the --debug flag
tripleo_upgrade_debug: false
# when set to True it will append the flag --validation-errors-nonfatal
# into the "overcloud <action> prepare" and "overcloud <action> converge"
# commands. This means that the execution of the command will continue
# independently in spite of validation errors.
tripleo_upgrade_validations_non_fatal: false
# When set to true, the converge operations (upgrade, ffwd)
# will run with --update-plan-only. This does not provide full
# verification of the process, but might be necessary to fit within a
# reasonable job run time for gating.
tripleo_upgrade_converge_plan_only: false
## If set to true, overcloud upgrade will be run for each of the
# playbooks defined in 'overcloud_upgrade_playbooks', otherwise
# 'all' will be passed into --playbooks option.
overcloud_upgrade_multibooks: false
## If set to true, overcloud update will be run for each of the
# playbooks defined in 'overcloud_update_playbooks', otherwise
# 'all' will be passed into --playbooks option.
overcloud_update_multibooks: false
# Ansible playbooks executed during "overcloud upgrade run"
overcloud_upgrade_playbooks: ['upgrade_steps_playbook.yaml', 'deploy_steps_playbook.yaml', 'post_upgrade_steps_playbook.yaml']
# Ansible playbooks executed during "overcloud update run"
overcloud_update_playbooks: ['update_steps_playbook.yaml', 'deploy_steps_playbook.yaml']
# Post upgrade/update nova actions tests
nova_actions_check: false
# Enabled services yaml structure
enabled_services: {}
# Ceph OSD service present in overcloud
ceph_osd_enabled: false
# time to wait for cephadm to complete update in minutes
ceph_update_timeout: 20
# Remove unused RPMs
upgrade_remove_rpm: false
# List of roles deployed in overcloud
oc_roles: []
# Dictionary with roles and hosts per role
# oc_roles_hosts: {
# 'Controller' : ["controller-0", "controller-1", "controller-2"],
# 'Compute' : ["compute-0"]
# }
oc_roles_hosts: {}
# Roles upgrade order
roles_upgrade_order: '__undefined__'
# Packet loss threshold for a ping test in seconds.
loss_threshold: 60
# and in %
loss_threshold_percent: 1
# default for update run tasks
update_loss_threshold: 0
# Leapp OS upgrade variables
#
# Run leapp upgrade without RHSM subscription
# Requires workarounds being applied.
leapp_unsubscribed: false
leapp_unregister: true
leapp_skip_release_check: false
# Avoid Rhel Enforcement policies.
enforce_rhel: false
# Compact system upgrade prepare and
# system upgrade run into a single step
system_upgrade_compact: true
# rhsm overcloud environment location with subscription details
rhsm_overcloud_env: ''
# ephemeral heat enabled
ephemeral_heat: true
# extra args for update prepare and update converge
extra_args: ''

View File

@ -1,3 +0,0 @@
sphinx>=2.0.0,!=2.1.0 # BSD
openstackdocstheme>=2.2.1 # Apache-2.0
reno>=3.1.0 # Apache-2.0

View File

@ -1,3 +0,0 @@
Host *
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null

View File

@ -1,104 +0,0 @@
#!/usr/bin/env python
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import yaml
def to_inventory_hostmap(data):
# Flattens inventory to a group->host mapping
if isinstance(data, str):
inventory = yaml.safe_load(data)
else:
inventory = data
group_host_map = {}
todo = collections.deque(inventory.keys())
while todo:
group = todo.popleft()
if 'hosts' in inventory[group]:
group_host_map[group] = list(inventory[group]['hosts'])
else:
if 'children' in inventory[group]:
for child in inventory[group]['children']:
# Children have not all been flattened yet
# so postpone flattening this group
if child in todo:
todo.append(group)
break
else:
group_host_map[group] = []
for child in inventory[group]['children']:
group_host_map[group] += group_host_map[child]
group_host_map[group].sort()
return group_host_map
def to_inventory_rolemap(data):
# Flattens inventory to a group->role mapping
if isinstance(data, str):
inventory = yaml.safe_load(data)
else:
inventory = data
group_role_map = {}
todo = collections.deque(inventory.keys())
while todo:
group = todo.popleft()
if 'tripleo_role_name' in inventory[group].get('vars', {}):
group_role_map[group] = [
inventory[group]['vars']['tripleo_role_name']
]
else:
group_role_map[group] = []
if 'children' in inventory[group]:
for child in inventory[group]['children']:
# Children have not all been flattened yet
# so postpone flattening this group
if child in todo:
todo.append(group)
break
else:
for child in inventory[group]['children']:
group_role_map[group] += group_role_map[child]
group_role_map[group].sort()
return group_role_map
def to_inventory_roles(data):
# Returns list of tripleo roles in inventory
if isinstance(data, str):
inventory = yaml.safe_load(data)
else:
inventory = data
roles = {}
for group, group_data in iter(inventory.items()):
group_role = group_data.get('vars', {}).get('tripleo_role_name', None)
if group_role is not None:
roles[group_role] = True
return sorted(list(roles))
class FilterModule(object):
def filters(self):
return {
'to_inventory_hostmap': to_inventory_hostmap,
'to_inventory_rolemap': to_inventory_rolemap,
'to_inventory_roles': to_inventory_roles,
}

View File

@ -1,293 +0,0 @@
---
# This is file and plugin.spec are required by Infrared project
- hosts: undercloud
gather_facts: true
become: true
become_method: sudo
become_user: stack
vars:
# Explicit ansible_python_interpreter to allow connecting
# to different OS releases (EL7/8) while using delegate_to.
ansible_python_interpreter: /usr/libexec/platform-python
pre_tasks:
- block:
- name: Set upgrade workload launch
set_fact:
workload_launch: true
when: install.upgrade.workload
- name: Set upgrade workload cleanup
set_fact:
workload_cleanup: true
when: install.upgrade.workloadcleanup
- name: Set upgrade workload image
set_fact:
workload_image_url: "{{ install.upgrade.workloadimage }}"
when: install.upgrade.workloadimage
- name: Set upgrade workload memory
set_fact:
workload_memory: "{{ install.upgrade.workloadmemory }}"
- name: Set upgrade workload user
set_fact:
workload_user: "{{ install.upgrade.workloaduser }}"
- name: Set upgrade workload disk size
set_fact:
workload_disk: "{{ install.upgrade.workloaddisk }}"
- name: Set upgrade workload vcpus
set_fact:
workload_vcpu: "{{ install.upgrade.workloadvcpu }}"
- name: Set upgrade workload swap
set_fact:
workload_swap: "{{ install.upgrade.workloadswap }}"
- name: Set upgrade workload sriov
set_fact:
workload_sriov: true
when: install.upgrade.workloadsriov
- name: Set external network name
set_fact:
external_network_name: "{{ install.public.net.name }}"
- name: Set undercloud upgrade
set_fact:
undercloud_upgrade: true
when: install.undercloud.upgrade
- name: Set undercloud operating system upgrade
set_fact:
undercloud_os_upgrade: true
when: install.undercloud.os.upgrade
- name: Set undercloud reboot
set_fact:
undercloud_reboot: true
when: install.undercloud.reboot
- name: Set overcloud upgrade
set_fact:
overcloud_upgrade: true
when: install.overcloud.upgrade
- name: Set upgrade workarounds
set_fact:
upgrade_workarounds: true
when: install.upgrade.workarounds
- name: Set upstream container images
set_fact:
upstream_container_images: false
when: not install.upstream.container.images
- name: Set use docker local registry
set_fact:
use_local_docker_registry: false
when: not install.upgrade.docker.local.registry
- name: Set docker registry url
set_fact:
docker_registry_url: "{{ install.upgrade.docker.registry.url }}"
- name: Set undercloud update
set_fact:
undercloud_update: true
when: install.undercloud.get('update', {})
- name: Set overcloud update
set_fact:
overcloud_update: true
when: install.overcloud.get('update', {})
- name: Set updates workarounds
set_fact:
updates_workarounds: true
when: install.updates.workarounds
- name: Set upgrade floating ip check
set_fact:
l3_agent_connectivity_check: true
when: install.upgrade.floatingip.check
- name: Search for compute group
set_fact:
compute_group: "{{item.key}}"
loop: "{{ groups|dict2items}}"
when: '"compute" in item.key'
- name: Set upgrade compute host evacuate
set_fact:
compute_evacuate: true
when:
- install.upgrade.compute.evacuate
- groups[compute_group] | length > 1
- name: Set upgrade compute host cold evacuate
set_fact:
compute_cold_evacuate: true
when:
- install.upgrade.compute.cold.evacuate
- groups[compute_group] | length > 1
- name: Set workload already exist before upgrade
set_fact:
workload_external: true
when:
- install.upgrade.workloadexternal
- groups[compute_group] | length > 1
- name: Set upgrade compute host migrate timeout
set_fact:
# install.upgrade.compute.migration.timeout default to 120 by infrared-plugin
compute_migration_timeout: "{{ install.upgrade.compute.migration.timeout }}"
when:
- groups[compute_group] | length > 1
- name: Set deployment-files base
set_fact:
container_registry_file: "{{ install.deployment.files | basename }}/docker-images.yaml"
when: install.deployment.files
- name: Set upgrade controller reboot
set_fact:
controller_reboot: true
when: install.upgrade.controller.reboot
- name: Set upgrade controller post
set_fact:
controller_upgrade_post: true
when: install.upgrade.controller.post
- name: Set upgrade force reboot
set_fact:
force_reboot: true
when: install.upgrade.reboot.force
- name: Set upgrade remove packages
set_fact:
upgrade_remove_rpm: true
when: install.upgrade.remove.rpm
- name: Set upgrade HCI
set_fact:
upgrade_hci: true
when: install.upgrade.hci
- name: Set overcloud credentials file
set_fact:
overcloud_rc: "{{ working_dir }}/{{ install.overcloud.stack }}rc"
- name: Set overcloud stack name
set_fact:
overcloud_stack_name: "{{ install.overcloud.stack }}"
- name: Set undercloud Operating System FFU upgrade
set_fact:
ffu_undercloud_os_upgrade: true
when: install.undercloud.ffu.os.upgrade
- name: Set undercloud FFU upgrade
set_fact:
ffu_undercloud_upgrade: true
when: install.undercloud.ffu.upgrade
- name: Set undercloud FFU upgrade releases
set_fact:
ffu_undercloud_releases: "{{ install.undercloud.ffu.releases }}"
- name: Set undercloud FFU upgrade repo
set_fact:
ffu_undercloud_repo_type: "{{ install.undercloud.ffu.repo }}"
- name: Set FFU upgrade workarounds
set_fact:
ffu_upgrade_workarounds: true
when: install.upgrade.ffu.workarounds
- name: Set overcloud FFU upgrade
set_fact:
ffu_overcloud_upgrade: true
when: install.overcloud.ffu.upgrade
- name: Set overcloud Fast and Furious Upgrade
set_fact:
fast_and_furious: true
when: install.fast.and.furious
- name: Set overcloud FFU upgrade releases
set_fact:
ffu_overcloud_releases: "{{ install.overcloud.ffu.releases }}"
- name: Set overcloud FFU upgrade repo
set_fact:
ffu_overcloud_repo_type: "{{ install.overcloud.ffu.repo }}"
- name: Set overcloud FFU bulk
set_fact:
ffu_bulk: false
when: not install.overcloud.ffu.bulk
- name: Set overcloud FFU compute rolling
set_fact:
ffu_computes_rolling: true
when: install.overcloud.ffu.compute.rolling
- name: Set overcloud ssh user name
set_fact:
overcloud_ssh_user: "{{ install.overcloud.ssh.user }}"
- name: Set upgrade l3 agent failover check
set_fact:
l3_agent_failover_check: true
when: install.upgrade.l3agent.failover.check
- name: Set upgrade nova_actions_check
set_fact:
nova_actions_check: true
when: install.upgrade.nova.actions.check
- name: Set rhsm_overcloud_env
set_fact:
rhsm_overcloud_env: "{{ install.rhsm.overcloud.env }}"
- name: Set enforce_rhel
set_fact:
enforce_rhel: true
when: install.enforce.rhel
- name: Set parameter_defaults for config_heat_extra.yaml
set_fact:
config_heat_extra_yaml:
parameter_defaults: "{{ install.get('config', {}).get('heat', {}) }}"
when: install.get('config', {}).get('heat', {})
- name: Set resource_registry for config_heat_extra.yaml
vars:
config_resource: "{{ install.get('config', {}).get('resource', {}) }}"
set_fact:
config_heat_extra_yaml: "{{ config_heat_extra_yaml | default({}) | combine({'resource_registry': config_resource}) }}"
when: install.get('config', {}).get('resource', {})
- name: Enable the execution of validations
set_fact:
run_validations: true
when: install.run.validations
- name: Set the validations to be skipped
set_fact:
skiplist_validations: "{{ install.skiplist.validations }}"
- name: Extra arguments to be passed in the validations run
set_fact:
validations_extra_args: "{{ install.validations.extra.args }}"
tags:
- always
roles:
- tripleo-upgrade

View File

@ -1,355 +0,0 @@
---
plugin_type: install
subparsers:
tripleo-upgrade:
description: Upgrade or update TripleO deployment
include_groups: ["Ansible options", "Inventory", "Common options", "Answers file"]
groups:
- title: TripleO Upgrade
options:
overcloud-upgrade:
type: Bool
help: |
Upgrade Overcloud.
NOTE: Upgrade require overcloud deployment script to be available in home directory of undercloud
user at undercloud node
default: false
undercloud-os-upgrade:
type: Bool
help: |
Upgrade Undercloud's Operating System
default: false
undercloud-upgrade:
type: Bool
help: |
Upgrade Undercloud
default: false
upgrade-workarounds:
type: Bool
help: |
Apply upgrade workarounds
default: false
upstream-container-images:
type: Bool
help: |
Use upstream or downstream container images during upgrade
default: false
undercloud-reboot:
type: Bool
help: |
Reboot undercloud post upgrade when ovs or kernel get upgraded
default: false
upgrade-floatingip-check:
type: Bool
help: |
Check floating ip connectivity during upgrade.
Note: This requires a running instance with attached floating ip and allowed icmp traffic.
When upgrade-workloadsriov flag is set, external IP
is used instead of FIP
default: false
upgrade-workload:
type: Bool
help: |
Launch workload before starting upgrade
default: false
upgrade-workloadcleanup:
type: Bool
help: |
Cleanup previously launched workload when update/upgrade ends
default: false
upgrade-workloadimage:
type: Value
help: |
Image URL to be used for spawning instance before upgrade.
default: https://download.cirros-cloud.net/0.6.0/cirros-0.6.0-x86_64-disk.img
upgrade-workloadmemory:
type: Value
help: |
Memory assigned to the instance spawned before upgrade
default: 512
upgrade-workloaduser:
type: Value
help: |
User used for conecting to workload instance via SSH
default: cirros
upgrade-workloaddisk:
type: Value
help: |
Disk size assigned to the instance spawned before upgrade
default: 5
upgrade-workloadvcpu:
type: Value
help: |
Amount of vcpus assigned to the instance spawned before upgrade
default: 1
upgrade-workloadswap:
type: Value
help: |
Swap size assigned to the instance spawned before upgrade
default: 512
upgrade-workloadsriov:
type: Bool
help: |
Workload is created with an SRIOV PF port
This option is not supported with cirros images
Correct values need to be set for upgrade-workloaduser,
upgrade-workloaddisk, upgrade-workloadvcpu,
upgrade-workloadmemory, upgrade-workloadimage
default: false
upgrade-compute-evacuate:
type: Bool
help: |
Live migrate instances between compute nodes during upgrade.
default: true
upgrade-compute-cold-evacuate:
type: Bool
help: |
Cold migrate instances between compute nodes during upgrade.
default: false
upgrade-compute-migration-timeout:
type: Value
help: |
Base timeout in seconds to wait for migration to finalize
during upgrade. Timeout scales value by multiplying it by the
number of instances that need to be migrated.
default: 120
upgrade-workloadexternal:
type: Bool
help: |
Workload exist prior to upgrade.
default: false
upgrade-controller-reboot:
type: Bool
help: |
Reboot controller nodes post upgrade
default: true
upgrade-controller-post:
type: Bool
help: |
Run controller post upgrade checks
default: true
upgrade-reboot-force:
type: Bool
help: |
Hard reboot nodes during upgrade
default: false
upgrade-docker-local-registry:
type: Bool
help: Use local docker registry on the undercloud
default: false
upgrade-docker-registry-url:
type: Value
help: The alternative docker registry to use for deployment.
default: 'registry.example.local'
upgrade-remove-rpm:
type: Bool
help: Remove packages which get migrated to containers during upgrade
default: false
upgrade-hci:
type: Bool
help: |
The upgrade workflow for HCI deployments is slightly different.
This option accomdates HCI upgrade.
default: false
upgrade-postcomposable-workload:
type: Bool
help: |
Launch workload after major composable upgrade step
default: false
upgrade-l3agent-failover-check:
type: Bool
help: |
Check l3 agent does not failover during upgrade.
Existing neutron router is required.
default: false
upgrade-nova-actions-check:
type: Bool
help: |
Check Nova actions can be performed to an already existing
instance post upgrade.
default: false
public-net-name:
type: Value
help: |
Specifies the name of the public network.
NOTE: If not provided it will use the default one for the OSP version
default: public
- title: TripleO Update
options:
overcloud-update:
type: Bool
help: |
Update Overcloud.
default: false
undercloud-update:
type: Bool
help: |
Update Undercloud
default: false
updates-workarounds:
type: Bool
help: |
Apply updates workarounds
default: false
deployment-files:
type: Value
help: |
Directory containing the templates of the overcloud deployment.
default: virt
enforce-rhel:
type: Bool
help: |
Skip Rhel Enforcment, false by default, use only when registred.
default: false
run-validations:
type: Bool
help: |
Turn validation execution on or off (default)
default: false
skiplist-validations:
type: Value
help: |
Comma separated string of validations names to be skipped.
default: ''
validations-extra-args:
type: Value
help: |
String containing some extra arguments to be passed in
the validations group execution.
Example: validations-extra-args: "--extra-vars min_undercloud_ram_gb=5"
default: ''
- title: TripleO Options
options:
overcloud-stack:
type: Value
help: Overcloud stack name
default: "overcloud"
overcloud-ssh-user:
type: Value
help: Overcloud ssh user name name
default: ''
config-heat:
type: NestedDict
action: append
help: |
Inject additional Tripleo Heat Templates configuration options under "paramater_defaults"
entry point.
Example:
--config-heat ComputeExtraConfig.nova::allow_resize_to_same_host=true
--config-heat NeutronOVSFirewallDriver=openvswitch
should inject the following yaml to "overcloud deploy" command:
---
parameter_defaults:
ComputeExtraConfig:
nova::allow_resize_to_same_host: true
NeutronOVSFirewallDriver: openvswitch
It is also possible to have . (dot) included in key by escaping it.
Example:
--config-heat "ControllerExtraConfig.opendaylight::log_levels.org\.opendaylight\.netvirt\.elan=TRACE"
should inject the following yaml to "overcloud deploy" command:
---
parameter_defaults:
ControllerExtraConfig:
opendaylight::log_levels:
org.opendaylight.netvirt.elan: TRACE
config-resource:
type: NestedDict
action: append
help: |
Inject additional Tripleo Heat Templates configuration options under "resource_registry"
entry point.
Example:
--config-resource OS::TripleO::BlockStorage::Net::SoftwareConfig=/home/stack/nic-configs/cinder-storage.yaml
should inject the following yaml to "overcloud deploy" command:
---
resource_registry:
OS::TripleO::BlockStorage::Net::SoftwareConfig: /home/stack/nic-configs/cinder-storage.yaml
rhsm-overcloud-env:
type: Value
help: Rhsm environment location to be passed during upgrade/update prepare step ssh user name name
default: ''
- title: TripleO Fast Forward Upgrade
options:
overcloud-ffu-upgrade:
type: Bool
help: |
Fast Forward Upgrade Overcloud
NOTE: Upgrade require overcloud deployment script to be available in home directory of undercloud
user at undercloud node
default: false
fast-and-furious:
type: Bool
help: |
Fast and Furious form of FFU Overcloud
NOTE: Will cause workload outage
default: false
undercloud-ffu-os-upgrade:
type: Bool
help: |
Fast Forward Operating System Upgrade Undercloud (Leapp)
default: false
undercloud-ffu-upgrade:
type: Bool
help: |
Fast Forward Upgrade Undercloud
default: false
undercloud-ffu-releases:
type: ListValue
help: |
Undercloud FFU upgrade releases
default: 14,15,16
undercloud-ffu-repo:
type: Value
help: |
Undercloud FFU upgrade repository method
default: 'rhos-release'
upgrade-ffu-workarounds:
type: Bool
help: |
Apply FFU upgrade workarounds
default: false
overcloud-ffu-releases:
type: ListValue
help: |
Overcloud FFU upgrade releases
default: 14,15,16
overcloud-ffu-repo:
type: Value
help: |
Overcloud FFU upgrade repository method
default: 'rhos-release'
overcloud-ffu-bulk:
type: Bool
help: |
Fast Forward Upgrade all overcloud nodes at once
default: false
overcloud-ffu-compute-rolling:
type: Bool
help: |
Fast Forward Upgrade compute nodes one by one
default: false
overcloud-ffu-replace-env-files:
type: KeyValueList
help: |
A comma-separated list of key/values which describe the environment files
whose content should be replaced during the upgrade phase.
The value must be the path to the new file, while the key must match
the file name to replaced as it shows inside the deploy script.
- title: Set up FFU packages
options:
mirror:
type: Value
help: |
Enable usage of specified mirror (for rpm, pip etc) [brq,qeos,tlv - or hostname].
(Specified mirror needs to proxy multiple rpm source hosts and pypi packages.)
build:
help: |
String represents a timestamp of the OSP puddle.
type: Value

View File

@ -1,57 +0,0 @@
---
galaxy_info:
namespace: openstack
role_name: tripleo_upgrade
author: OpenStack
description: "Manage OpenStack upgrade"
company: Red Hat
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: Apache 2.0
min_ansible_version: 2.8
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If travis integration is cofigured, only notification for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
# github_branch:
#
# Below are all platforms currently available. Just uncomment
# the ones that apply to your role. If you don't see your
# platform on this list, let us know and we'll get it added!
#
platforms:
- name: EL
versions:
- 7
galaxy_tags:
- system
- redhat
- upgrade
# List tags for your role here, one per line. A tag is
# a keyword that describes and categorizes the role.
# Users find roles by searching for tags. Be sure to
# remove the '[]' above if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of
# alphanumeric characters. Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line.
# Be sure to remove the '[]' above if you add dependencies
# to this list.

View File

@ -1,38 +0,0 @@
---
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
roles:
- name: openstack-operations
scm: git
src: https://opendev.org/openstack/ansible-role-openstack-operations
version: master
trackbranch: master
# ansible-config_template action plugin
- name: config_template
scm: git
src: https://opendev.org/openstack/ansible-config_template
version: master
trackbranch: master
- name: tripleo-ansible
scm: git
src: https://opendev.org/openstack/tripleo-ansible
version: master
trackbranch: master
collections:
- community.general

View File

@ -1,26 +0,0 @@
export TRIPLEO_UPGRADE_WORKPATH="$(dirname $(readlink -f ${BASH_SOURCE[0]}))"
export ANSIBLE_STDOUT_CALLBACK=debug
export ANSIBLE_ACTION_PLUGINS="${TRIPLEO_UPGRADE_WORKPATH}/molecule/roles.galaxy/config_template/action:${TRIPLEO_UPGRADE_WORKPATH}/molecule/ansible_plugins/action"
export ANSIBLE_CALLBACK_PLUGINS="${TRIPLEO_UPGRADE_WORKPATH}/molecule/ansible_plugins/callback"
export ANSIBLE_FILTER_PLUGINS="${TRIPLEO_UPGRADE_WORKPATH}/molecule/ansible_plugins/filter"
export ANSIBLE_LIBRARY="${TRIPLEO_UPGRADE_WORKPATH}/molecule/roles.galaxy/config_template/library:${TRIPLEO_UPGRADE_WORKPATH}/molecule/ansible_plugins/modules"
export ANSIBLE_MODULE_UTILS="${TRIPLEO_UPGRADE_WORKPATH}/molecule/ansible_plugins/module_utils"
export ANSIBLE_ROLES_PATH="${TRIPLEO_UPGRADE_WORKPATH}/molecule/roles.galaxy:${TRIPLEO_UPGRADE_WORKPATH}"
export ANSIBLE_INVENTORY="${TRIPLEO_UPGRADE_WORKPATH}/molecule/tests/hosts.ini"
export ANSIBLE_RETRY_FILES_ENABLED="0"
export ANSIBLE_LOAD_CALLBACK_PLUGINS="1"
export ANSIBLE_HOST_KEY_CHECKING=False
export ANSIBLE_LOCAL_TEMP="${TRIPLEO_UPGRADE_WORKPATH}/tmp/"
export ANSIBLE_REMOTE_TEMP="${TRIPLEO_UPGRADE_WORKPATH}/tmp/"
function unset-ansible-test-env {
for i in $(env | grep ANSIBLE_ | awk -F'=' '{print $1}'); do
unset ${i}
done
unset TRIPLEO_UPGRADE_WORKPATH
echo -e "Ansible test environment deactivated.\n"
unset -f unset-ansible-test-env
}
echo -e "Ansible test environment is now active"
echo -e "Run 'unset-ansible-test-env' to deactivate.\n"

View File

@ -1,3 +0,0 @@
*
*/
!.gitignore

View File

@ -1,37 +0,0 @@
# This file facilitates OpenStack-CI package installation
# before the execution of any tests.
#
# See the following for details:
# - https://docs.openstack.org/infra/bindep/
# - https://opendev.org/opendev/bindep/
#
# Even if the role does not make use of this facility, it
# is better to have this file empty, otherwise OpenStack-CI
# will fall back to installing its default packages which
# will potentially be detrimental to the tests executed.
# The gcc compiler
gcc
# Base requirements for RPM distros
gcc-c++ [platform:rpm]
git [platform:rpm]
libffi-devel [platform:rpm]
openssl-devel [platform:rpm]
python-devel [platform:rpm !platform:rhel-8 !platform:centos-8]
python3-devel [platform:rpm !platform:rhel-7 !platform:centos-7]
PyYAML [platform:rpm !platform:rhel-8 !platform:centos-8]
python3-pyyaml [platform:rpm !platform:rhel-7 !platform:centos-7]
python3-dnf [platform:rpm !platform:rhel-7 !platform:centos-7]
# For SELinux
libselinux-python [platform:rpm !platform:rhel-8 !platform:centos-8]
libsemanage-python [platform:redhat !platform:rhel-8 !platform:centos-8]
libselinux-python3 [platform:rpm !platform:rhel-7 !platform:centos-7]
libsemanage-python3 [platform:redhat !platform:rhel-7 !platform:centos-7]
# Required for compressing collected log files in CI
gzip
# Required to build language docs
gettext

View File

@ -1,37 +0,0 @@
# Molecule managed
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
{% if item.registry is defined %}
FROM {{ item.registry.url }}/{{ item.image }}
{% else %}
FROM {{ item.image }}
{% endif %}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash {{ item.pkg_extras | default('') }} && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl python-setuptools bash {{ item.pkg_extras | default('') }} && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml {{ item.pkg_extras | default('') }} && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates {{ item.pkg_extras | default('') }}; \
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates {{ item.pkg_extras | default('') }} && xbps-remove -O; fi
{% for pkg in item.easy_install | default([]) %}
# install pip for centos where there is no python-pip rpm in default repos
RUN easy_install {{ pkg }}
{% endfor %}
CMD ["sh", "-c", "while true; do sleep 10000; done"]

View File

@ -1,23 +0,0 @@
#!/bin/bash
set -ux
### --start_docs
## Deploying the overcloud
## =======================
## Prepare Your Environment
## ------------------------
## * Source in the undercloud credentials.
## ::
source /home/zuul/stackrc
## * Deploy the overcloud!
## ::
openstack overcloud deploy --override-ansible-cfg /home/zuul/custom_ansible.cfg \
--templates /usr/share/openstack-tripleo-heat-templates \
--libvirt-type qemu --timeout 120 --ntp-server 0.pool.ntp.org,1.pool.ntp.org,2.pool.ntp.org,3.pool.ntp.org -e /home/zuul/cloud-names.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/docker-ha.yaml -e /home/zuul/containers-prepare-parameter.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/docker.yaml -e /usr/share/openstack-tripleo-heat-templates/ci/environments/network/multiple-nics/network-isolation-absolute.yaml -e /usr/share/openstack-tripleo-heat-templates/ci/environments/network/multiple-nics/network-environment.yaml -e /home/zuul/overcloud_network_params.yaml -e /home/zuul/overcloud_storage_params.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/low-memory-usage.yaml -e /home/zuul/src/opendev.org/openstack/tripleo-ci/test-environments/worker-config.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/debug.yaml -e /home/zuul/enable-tls.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/ssl/tls-endpoints-public-ip.yaml -e /home/zuul/inject-trust-anchor.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/disable-telemetry.yaml --validation-errors-nonfatal -e /home/zuul/overcloud-topology-config.yaml -e /home/zuul/overcloud-selinux-config.yaml -e /usr/share/openstack-tripleo-heat-templates/ci/environments/ovb-ha.yaml \
"$@" && status_code=0 || status_code=$?

View File

@ -1,22 +0,0 @@
# Clear any old environment that may conflict.
for key in $( set | awk -F= '/^OS_/ {print $1}' ); do unset "${key}" ; done
export OS_AUTH_TYPE=password
export OS_PASSWORD=c585670e0f69af8aa55d4e328f1fbdb4a3b1a978
export OS_AUTH_URL=https://192.168.24.2:13000
export OS_USERNAME=admin
export OS_PROJECT_NAME=admin
export COMPUTE_API_VERSION=1.1
export NOVA_VERSION=1.1
export OS_NO_CACHE=True
export OS_CLOUDNAME=undercloud
export OS_IDENTITY_API_VERSION='3'
export OS_PROJECT_DOMAIN_NAME='Default'
export OS_USER_DOMAIN_NAME='Default'
# Add OS_CLOUDNAME to PS1
if [ -z "${CLOUDPROMPT_ENABLED:-}" ]; then
export PS1=${PS1:-""}
export PS1=\${OS_CLOUDNAME:+"(\$OS_CLOUDNAME)"}\ $PS1
export CLOUDPROMPT_ENABLED=1
fi
export PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available"

View File

@ -1,753 +0,0 @@
---
CephStorage:
children:
qe-Cloud-0_CephStorage: {}
Compute:
children:
qe-Cloud-0_Compute: {}
ControllerOpenstack:
children:
qe-Cloud-0_ControllerOpenstack: {}
Database:
children:
qe-Cloud-0_Database: {}
Messaging:
children:
qe-Cloud-0_Messaging: {}
Networker:
children:
qe-Cloud-0_Networker: {}
Undercloud:
hosts:
undercloud: {}
vars:
ansible_connection: ssh
ansible_host: localhost
ansible_python_interpreter: /usr/bin/python3
ansible_remote_tmp: /tmp/ansible-${USER}
ansible_ssh_private_key_file: /var/lib/mistral/.ssh/tripleo-admin-rsa
ansible_ssh_user: tripleo-admin
auth_url: https://192.168.24.2:13000
cacert: null
os_auth_token: gAAAAABes3cR-mLfqeB9GWHv-eGb8R7BYR0FaBqP_VLhV5fA7WK_ajYtdZ29-
overcloud_admin_password: ukPnO3gSPheHD0wQ1H6LPTLTe
overcloud_horizon_url: https://10.0.0.101:443/dashboard
overcloud_keystone_url: https://10.0.0.101:13000
plan: qe-Cloud-0
plans:
- qe-Cloud-0
project_name: admin
undercloud_service_list:
- tripleo_nova_compute
- tripleo_heat_engine
- tripleo_ironic_conductor
- tripleo_swift_container_server
- tripleo_swift_object_server
- tripleo_mistral_engine
undercloud_swift_url: https://192.168.24.2:13808/v1/AUTH_783a7dbfeec14726bef2fc60c4044129
username: admin
allovercloud:
children:
qe-Cloud-0_allovercloud: {}
boot_params_service:
children:
qe-Cloud-0_boot_params_service: {}
ca_certs:
children:
qe-Cloud-0_ca_certs: {}
ceph_client:
children:
qe-Cloud-0_ceph_client: {}
ceph_mgr:
children:
qe-Cloud-0_ceph_mgr: {}
ceph_mon:
children:
qe-Cloud-0_ceph_mon: {}
ceph_osd:
children:
qe-Cloud-0_ceph_osd: {}
certmonger_user:
children:
qe-Cloud-0_certmonger_user: {}
chrony:
children:
qe-Cloud-0_chrony: {}
cinder_api:
children:
qe-Cloud-0_cinder_api: {}
cinder_backup:
children:
qe-Cloud-0_cinder_backup: {}
cinder_scheduler:
children:
qe-Cloud-0_cinder_scheduler: {}
cinder_volume:
children:
qe-Cloud-0_cinder_volume: {}
clients:
children:
qe-Cloud-0_clients: {}
clustercheck:
children:
qe-Cloud-0_clustercheck: {}
container_image_prepare:
children:
qe-Cloud-0_container_image_prepare: {}
glance_api:
children:
qe-Cloud-0_glance_api: {}
haproxy:
children:
qe-Cloud-0_haproxy: {}
heat_api:
children:
qe-Cloud-0_heat_api: {}
heat_api_cfn:
children:
qe-Cloud-0_heat_api_cfn: {}
heat_api_cloudwatch_disabled:
children:
qe-Cloud-0_heat_api_cloudwatch_disabled: {}
heat_engine:
children:
qe-Cloud-0_heat_engine: {}
horizon:
children:
qe-Cloud-0_horizon: {}
iscsid:
children:
qe-Cloud-0_iscsid: {}
kernel:
children:
qe-Cloud-0_kernel: {}
keystone:
children:
qe-Cloud-0_keystone: {}
keystone_admin_api:
children:
qe-Cloud-0_keystone_admin_api: {}
keystone_public_api:
children:
qe-Cloud-0_keystone_public_api: {}
logrotate_crond:
children:
qe-Cloud-0_logrotate_crond: {}
memcached:
children:
qe-Cloud-0_memcached: {}
mgrs:
children:
qe-Cloud-0_mgrs: {}
mons:
children:
qe-Cloud-0_mons: {}
mysql:
children:
qe-Cloud-0_mysql: {}
mysql_client:
children:
qe-Cloud-0_mysql_client: {}
neutron_api:
children:
qe-Cloud-0_neutron_api: {}
neutron_plugin_ml2_ovn:
children:
qe-Cloud-0_neutron_plugin_ml2_ovn: {}
nova_api:
children:
qe-Cloud-0_nova_api: {}
nova_compute:
children:
qe-Cloud-0_nova_compute: {}
nova_conductor:
children:
qe-Cloud-0_nova_conductor: {}
nova_libvirt:
children:
qe-Cloud-0_nova_libvirt: {}
nova_libvirt_guests:
children:
qe-Cloud-0_nova_libvirt_guests: {}
nova_metadata:
children:
qe-Cloud-0_nova_metadata: {}
nova_migration_target:
children:
qe-Cloud-0_nova_migration_target: {}
nova_scheduler:
children:
qe-Cloud-0_nova_scheduler: {}
nova_vnc_proxy:
children:
qe-Cloud-0_nova_vnc_proxy: {}
osds:
children:
qe-Cloud-0_osds: {}
oslo_messaging_notify:
children:
qe-Cloud-0_oslo_messaging_notify: {}
oslo_messaging_rpc:
children:
qe-Cloud-0_oslo_messaging_rpc: {}
overcloud:
children:
qe-Cloud-0: {}
ovn_controller:
children:
qe-Cloud-0_ovn_controller: {}
ovn_dbs:
children:
qe-Cloud-0_ovn_dbs: {}
ovn_metadata:
children:
qe-Cloud-0_ovn_metadata: {}
pacemaker:
children:
qe-Cloud-0_pacemaker: {}
podman:
children:
qe-Cloud-0_podman: {}
qe-Cloud-0:
children:
qe-Cloud-0_allovercloud: {}
qe-Cloud-0_CephStorage:
hosts:
ceph-0:
ansible_host: 192.168.24.41
canonical_hostname: ceph-0.redhat.local
ctlplane_hostname: ceph-0.ctlplane.redhat.local
ctlplane_ip: 192.168.24.41
deploy_server_id: 021799e4-0d6e-4764-94c9-695b12ce29b3
storage_hostname: ceph-0.storage.redhat.local
storage_ip: 172.17.3.95
storage_mgmt_hostname: ceph-0.storagemgmt.redhat.local
storage_mgmt_ip: 172.17.4.96
ceph-1:
ansible_host: 192.168.24.37
canonical_hostname: ceph-1.redhat.local
ctlplane_hostname: ceph-1.ctlplane.redhat.local
ctlplane_ip: 192.168.24.37
deploy_server_id: ead956a7-0e7e-45b3-b256-4cc52646b7d7
storage_hostname: ceph-1.storage.redhat.local
storage_ip: 172.17.3.28
storage_mgmt_hostname: ceph-1.storagemgmt.redhat.local
storage_mgmt_ip: 172.17.4.127
ceph-2:
ansible_host: 192.168.24.18
canonical_hostname: ceph-2.redhat.local
ctlplane_hostname: ceph-2.ctlplane.redhat.local
ctlplane_ip: 192.168.24.18
deploy_server_id: 55a996c0-fe7a-48da-a0f8-bea48d4f7dae
storage_hostname: ceph-2.storage.redhat.local
storage_ip: 172.17.3.101
storage_mgmt_hostname: ceph-2.storagemgmt.redhat.local
storage_mgmt_ip: 172.17.4.11
vars:
ansible_ssh_user: tripleo-admin
bootstrap_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
serial: '1'
tripleo_role_name: CephStorage
tripleo_role_networks:
- ctlplane
- storage
- storage_mgmt
qe-Cloud-0_Compute:
hosts:
compute-0:
ansible_host: 192.168.24.13
canonical_hostname: compute-0.redhat.local
ctlplane_hostname: compute-0.ctlplane.redhat.local
ctlplane_ip: 192.168.24.13
deploy_server_id: f4b0c41c-6006-4c22-abcd-cea470087102
internal_api_hostname: compute-0.internalapi.redhat.local
internal_api_ip: 172.17.1.115
storage_hostname: compute-0.storage.redhat.local
storage_ip: 172.17.3.63
tenant_hostname: compute-0.tenant.redhat.local
tenant_ip: 172.17.2.92
compute-1:
ansible_host: 192.168.24.53
canonical_hostname: compute-1.redhat.local
ctlplane_hostname: compute-1.ctlplane.redhat.local
ctlplane_ip: 192.168.24.53
deploy_server_id: 95c5fe03-88f1-417a-926d-089e9b5f9e8e
internal_api_hostname: compute-1.internalapi.redhat.local
internal_api_ip: 172.17.1.147
storage_hostname: compute-1.storage.redhat.local
storage_ip: 172.17.3.112
tenant_hostname: compute-1.tenant.redhat.local
tenant_ip: 172.17.2.61
vars:
ansible_ssh_user: tripleo-admin
bootstrap_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
serial: '1'
tripleo_role_name: Compute
tripleo_role_networks:
- ctlplane
- internal_api
- storage
- tenant
qe-Cloud-0_ControllerOpenstack:
hosts:
controller-0:
ansible_host: 192.168.24.28
canonical_hostname: controller-0.redhat.local
ctlplane_hostname: controller-0.ctlplane.redhat.local
ctlplane_ip: 192.168.24.28
deploy_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
external_hostname: controller-0.external.redhat.local
external_ip: 10.0.0.142
internal_api_hostname: controller-0.internalapi.redhat.local
internal_api_ip: 172.17.1.46
storage_hostname: controller-0.storage.redhat.local
storage_ip: 172.17.3.27
storage_mgmt_hostname: controller-0.storagemgmt.redhat.local
storage_mgmt_ip: 172.17.4.75
tenant_hostname: controller-0.tenant.redhat.local
tenant_ip: 172.17.2.66
controller-1:
ansible_host: 192.168.24.44
canonical_hostname: controller-1.redhat.local
ctlplane_hostname: controller-1.ctlplane.redhat.local
ctlplane_ip: 192.168.24.44
deploy_server_id: 3d4f0662-53db-4934-8307-faa85a4c3cd7
external_hostname: controller-1.external.redhat.local
external_ip: 10.0.0.139
internal_api_hostname: controller-1.internalapi.redhat.local
internal_api_ip: 172.17.1.42
storage_hostname: controller-1.storage.redhat.local
storage_ip: 172.17.3.133
storage_mgmt_hostname: controller-1.storagemgmt.redhat.local
storage_mgmt_ip: 172.17.4.59
tenant_hostname: controller-1.tenant.redhat.local
tenant_ip: 172.17.2.40
controller-2:
ansible_host: 192.168.24.8
canonical_hostname: controller-2.redhat.local
ctlplane_hostname: controller-2.ctlplane.redhat.local
ctlplane_ip: 192.168.24.8
deploy_server_id: 08e2dbc4-d698-44f3-8b01-3e2bb96c2a2c
external_hostname: controller-2.external.redhat.local
external_ip: 10.0.0.137
internal_api_hostname: controller-2.internalapi.redhat.local
internal_api_ip: 172.17.1.41
storage_hostname: controller-2.storage.redhat.local
storage_ip: 172.17.3.57
storage_mgmt_hostname: controller-2.storagemgmt.redhat.local
storage_mgmt_ip: 172.17.4.86
tenant_hostname: controller-2.tenant.redhat.local
tenant_ip: 172.17.2.45
vars:
ansible_ssh_user: tripleo-admin
bootstrap_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
serial: '1'
tripleo_role_name: ControllerOpenstack
tripleo_role_networks:
- ctlplane
- external
- internal_api
- storage
- storage_mgmt
- tenant
qe-Cloud-0_Database:
hosts:
database-0:
ansible_host: 192.168.24.40
canonical_hostname: database-0.redhat.local
ctlplane_hostname: database-0.ctlplane.redhat.local
ctlplane_ip: 192.168.24.40
deploy_server_id: c87f2577-c47f-4bca-b1b2-7f688bb84931
internal_api_hostname: database-0.internalapi.redhat.local
internal_api_ip: 172.17.1.81
database-1:
ansible_host: 192.168.24.30
canonical_hostname: database-1.redhat.local
ctlplane_hostname: database-1.ctlplane.redhat.local
ctlplane_ip: 192.168.24.30
deploy_server_id: 1bfc70d0-98c8-49bc-aab0-76ad5526091b
internal_api_hostname: database-1.internalapi.redhat.local
internal_api_ip: 172.17.1.125
database-2:
ansible_host: 192.168.24.32
canonical_hostname: database-2.redhat.local
ctlplane_hostname: database-2.ctlplane.redhat.local
ctlplane_ip: 192.168.24.32
deploy_server_id: 7758cf0f-4ae6-44c9-9c78-37ad2baf28ee
internal_api_hostname: database-2.internalapi.redhat.local
internal_api_ip: 172.17.1.63
vars:
ansible_ssh_user: tripleo-admin
bootstrap_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
serial: '1'
tripleo_role_name: Database
tripleo_role_networks:
- ctlplane
- internal_api
qe-Cloud-0_Messaging:
hosts:
messaging-0:
ansible_host: 192.168.24.36
canonical_hostname: messaging-0.redhat.local
ctlplane_hostname: messaging-0.ctlplane.redhat.local
ctlplane_ip: 192.168.24.36
deploy_server_id: a5cbac81-462f-4bae-a43d-f3e207359c5c
internal_api_hostname: messaging-0.internalapi.redhat.local
internal_api_ip: 172.17.1.30
messaging-1:
ansible_host: 192.168.24.38
canonical_hostname: messaging-1.redhat.local
ctlplane_hostname: messaging-1.ctlplane.redhat.local
ctlplane_ip: 192.168.24.38
deploy_server_id: 608601da-5f38-4512-8856-7f325eef4eba
internal_api_hostname: messaging-1.internalapi.redhat.local
internal_api_ip: 172.17.1.117
messaging-2:
ansible_host: 192.168.24.17
canonical_hostname: messaging-2.redhat.local
ctlplane_hostname: messaging-2.ctlplane.redhat.local
ctlplane_ip: 192.168.24.17
deploy_server_id: 0224f4a5-20a8-403d-90ca-78aaca3e1f16
internal_api_hostname: messaging-2.internalapi.redhat.local
internal_api_ip: 172.17.1.23
vars:
ansible_ssh_user: tripleo-admin
bootstrap_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
serial: '1'
tripleo_role_name: Messaging
tripleo_role_networks:
- ctlplane
- internal_api
qe-Cloud-0_Networker:
hosts:
networker-0:
ansible_host: 192.168.24.50
canonical_hostname: networker-0.redhat.local
ctlplane_hostname: networker-0.ctlplane.redhat.local
ctlplane_ip: 192.168.24.50
deploy_server_id: 1a2dee31-0c1d-4769-91c8-ff9bcb355ff2
internal_api_hostname: networker-0.internalapi.redhat.local
internal_api_ip: 172.17.1.82
tenant_hostname: networker-0.tenant.redhat.local
tenant_ip: 172.17.2.31
networker-1:
ansible_host: 192.168.24.35
canonical_hostname: networker-1.redhat.local
ctlplane_hostname: networker-1.ctlplane.redhat.local
ctlplane_ip: 192.168.24.35
deploy_server_id: b9fafe0f-f3b8-4b75-9e7a-35f844ea8ffa
internal_api_hostname: networker-1.internalapi.redhat.local
internal_api_ip: 172.17.1.133
tenant_hostname: networker-1.tenant.redhat.local
tenant_ip: 172.17.2.18
vars:
ansible_ssh_user: tripleo-admin
bootstrap_server_id: 662ffaba-c0ee-4113-b1ef-35d5f3ef29d7
serial: '1'
tripleo_role_name: Networker
tripleo_role_networks:
- ctlplane
- internal_api
- tenant
qe-Cloud-0_allovercloud:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_boot_params_service:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_ca_certs:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_ceph_client:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_ceph_mgr:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_ceph_mon:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_ceph_osd:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_certmonger_user:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_chrony:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_cinder_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_cinder_backup:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_cinder_scheduler:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_cinder_volume:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_clients:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_clustercheck:
children:
qe-Cloud-0_Database: {}
qe-Cloud-0_container_image_prepare:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_glance_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_haproxy:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_heat_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_heat_api_cfn:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_heat_api_cloudwatch_disabled:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_heat_engine:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_horizon:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_iscsid:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_kernel:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_keystone:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_keystone_admin_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_keystone_public_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_logrotate_crond:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_memcached:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_mgrs:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_mons:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_mysql:
children:
qe-Cloud-0_Database: {}
qe-Cloud-0_mysql_client:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_neutron_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_neutron_plugin_ml2_ovn:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_nova_api:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_nova_compute:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_nova_conductor:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_nova_libvirt:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_nova_libvirt_guests:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_nova_metadata:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_nova_migration_target:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_nova_scheduler:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_nova_vnc_proxy:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_osds:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_oslo_messaging_notify:
children:
qe-Cloud-0_Messaging: {}
qe-Cloud-0_oslo_messaging_rpc:
children:
qe-Cloud-0_Messaging: {}
qe-Cloud-0_overcloud:
children:
qe-Cloud-0_allovercloud: {}
qe-Cloud-0_ovn_controller:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_ovn_dbs:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_ovn_metadata:
children:
qe-Cloud-0_Compute: {}
qe-Cloud-0_pacemaker:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_podman:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_qe-Cloud-0:
children:
qe-Cloud-0_allovercloud: {}
qe-Cloud-0_redis:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_snmp:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_sshd:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_swift_proxy:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_swift_ringbuilder:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_swift_storage:
children:
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_timezone:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_tripleo_firewall:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_tripleo_packages:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
qe-Cloud-0_tuned:
children:
qe-Cloud-0_CephStorage: {}
qe-Cloud-0_Compute: {}
qe-Cloud-0_ControllerOpenstack: {}
qe-Cloud-0_Database: {}
qe-Cloud-0_Messaging: {}
qe-Cloud-0_Networker: {}
redis:
children:
qe-Cloud-0_redis: {}
snmp:
children:
qe-Cloud-0_snmp: {}
sshd:
children:
qe-Cloud-0_sshd: {}
swift_proxy:
children:
qe-Cloud-0_swift_proxy: {}
swift_ringbuilder:
children:
qe-Cloud-0_swift_ringbuilder: {}
swift_storage:
children:
qe-Cloud-0_swift_storage: {}
timezone:
children:
qe-Cloud-0_timezone: {}
tripleo_firewall:
children:
qe-Cloud-0_tripleo_firewall: {}
tripleo_packages:
children:
qe-Cloud-0_tripleo_packages: {}
tuned:
children:
qe-Cloud-0_tuned: {}

View File

@ -1,51 +0,0 @@
---
driver:
name: delegated
options:
managed: false
login_cmd_template: >-
ssh
-o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
-o Compression=no
-o TCPKeepAlive=yes
-o VerifyHostKeyDNS=no
-o ForwardX11=no
-o ForwardAgent=no
{instance}
ansible_connection_options:
ansible_connection: ssh
log: true
platforms:
- name: instance
provisioner:
name: ansible
config_options:
defaults:
fact_caching: jsonfile
fact_caching_connection: /tmp/molecule/facts
inventory:
hosts:
all:
hosts:
instance:
ansible_host: localhost
log: true
env:
ANSIBLE_STDOUT_CALLBACK: yaml
ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${TRIPLEO_UPGRADE_WORKPATH}/roles.galaxy/tripleo-ansible/tripleo-ansible/tripleo_ansible/roles:${HOME}/zuul-jobs/roles"
scenario:
test_sequence:
- prepare
- converge
lint: |
flake8
verifier:
name: testinfra

View File

@ -1,27 +0,0 @@
---
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
- name: Converge
hosts: all
vars:
use_oooq: true
upgrade_noop: true
update_noop: true
ffu_noop: true
overcloud_stack_name: "qe-Cloud-0"
roles:
- tripleo-upgrade

View File

@ -1,65 +0,0 @@
---
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
- name: Prepare
hosts: all
become: true
gather_facts: true
pre_tasks:
- name: set basic user fact
set_fact:
ansible_user: "{{ lookup('env', 'USER') }}"
when:
- ansible_user is undefined
- name: set basic home fact
set_fact:
ansible_user_dir: "/home/{{ ansible_user }}"
- name: Set project path fact
set_fact:
tripleo_upgrade_project_path: "{{ ansible_user_dir }}/{{ zuul.project.src_dir | default(lookup('env', 'ZUUL_PROJECT_SRC_DIR')) }}"
when:
- tripleo_upgrade_project_path is undefined
roles:
- role: test_deps
post_tasks:
- name: "Copy dummy files into {{ ansible_user_dir }}"
copy:
src: "{{ tripleo_upgrade_project_path }}/molecule/default/mock_files/{{ item }}"
dest: "/home/{{ ansible_user }}/{{ item }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
loop:
- 'overcloud_deploy.sh'
- 'stackrc'
- name: Ensures config-download directory exists
file:
path: "/home/{{ ansible_user }}/overcloud-deploy/qe-Cloud-0/config-download/qe-Cloud-0"
state: directory
- name: "Copy inventory to expected location"
copy:
src: "{{ tripleo_upgrade_project_path }}/molecule/default/mock_files/tripleo-ansible-inventory.yaml"
dest: "/home/{{ ansible_user }}/overcloud-deploy/qe-Cloud-0/config-download/qe-Cloud-0/tripleo-ansible-inventory.yaml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'

View File

@ -1,13 +0,0 @@
# this is required for the molecule jobs
ansible-core
ansi2html
docker
dogpile.cache>=>0.9.2 # MIT
pytest
pytest-cov
pytest-html
pytest-xdist
mock
sh>=1.12.14,<1.13
molecule>=3.3.4
netaddr

View File

@ -1,3 +0,0 @@
*
*/
!.gitignore

View File

@ -1,45 +0,0 @@
#!/usr/bin/env bash
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
## Shell Opts ----------------------------------------------------------------
set -o pipefail
set -xeuo
## Vars ----------------------------------------------------------------------
export BINDEP_FILE="${BINDEP_FILE:-$(dirname $(readlink -f ${BASH_SOURCE[0]}))/../bindep.txt}"
## Main ----------------------------------------------------------------------
# Source distribution information
source /etc/os-release || source /usr/lib/os-release
RHT_PKG_MGR=$(command -v dnf || command -v yum)
# NOTE(cloudnull): Get a list of packages to install with bindep. If packages
# need to be installed, bindep exits with an exit code of 1.
BINDEP_PKGS=$(bindep -b -f "${BINDEP_FILE}" test || true)
if [[ ${#BINDEP_PKGS} > 0 ]]; then
case "${ID,,}" in
amzn|rhel|centos|fedora)
sudo "${RHT_PKG_MGR}" install -y ${BINDEP_PKGS}
;;
esac
fi

View File

@ -1,71 +0,0 @@
#!/usr/bin/env bash
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
## Shell Opts ----------------------------------------------------------------
set -o pipefail
set -xeuo
## Vars ----------------------------------------------------------------------
export PROJECT_DIR="$(dirname $(readlink -f ${BASH_SOURCE[0]}))/../../"
export TRIPLEO_JOB_ANSIBLE_ARGS=${TRIPLEO_JOB_ANSIBLE_ARGS:-"-v"}
## Main ----------------------------------------------------------------------
# Source distribution information
source /etc/os-release || source /usr/lib/os-release
RHT_PKG_MGR=$(command -v dnf || command -v yum)
PYTHON_EXEC=$(command -v python3 || command -v python)
# Install the one requirement we need to run any local test
case "${ID,,}" in
amzn|rhel|centos|fedora)
sudo "${RHT_PKG_MGR}" install -y python*-virtualenv
;;
esac
# Create a virtual env
"${PYTHON_EXEC}" -m virtualenv --system-site-packages "${HOME}/test-python"
# Run bindep
"${HOME}/test-python/bin/pip" install pip setuptools bindep --upgrade
"${PROJECT_DIR}/molecule/scripts/bindep-install"
# Install local requirements
if [[ -d "${HOME}/.cache/pip/wheels" ]]; then
rm -rf "${HOME}/.cache/pip/wheels"
fi
"${HOME}/test-python/bin/pip" install \
-r "${PROJECT_DIR}/requirements.txt" \
-r "${PROJECT_DIR}/test-requirements.txt" \
-r "${PROJECT_DIR}/molecule/molecule-requirements.txt"
# Run local test
PS1="[\u@\h \W]\$" source "${HOME}/test-python/bin/activate"
source "${PROJECT_DIR}/molecule/ansible-test-env.rc"
export ANSIBLE_ROLES_PATH="${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles"
echo "------------"
ansible-playbook -i "${PROJECT_DIR}/molecule/tests/hosts.ini" \
-e "tripleo_src=$(realpath --relative-to="${HOME}" "${PROJECT_DIR}")" \
-e "tripleo_job_ansible_args='${TRIPLEO_JOB_ANSIBLE_ARGS}'" \
-e "ansible_user=${USER}" \
-e "ansible_user_dir=${HOME}" \
"${PROJECT_DIR}/zuul.d/playbooks/run-local.yml" \
"${PROJECT_DIR}/zuul.d/playbooks/prepare-test-host.yml" \
"${PROJECT_DIR}/zuul.d/playbooks/pre.yml" \
"${PROJECT_DIR}/zuul.d/playbooks/run.yml"

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pytest_addoption(parser):
parser.addoption('--scenario', help='scenario setting')
parser.addoption('--ansible-args',
help='ansible args passed into test runner.')

View File

@ -1 +0,0 @@
test ansible_connection=local ansible_host=localhost

View File

@ -1,43 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
def test_molecule(pytestconfig):
cmd = ['python', '-m', 'molecule', '--debug']
scenario = pytestconfig.getoption("scenario")
ansible_args = pytestconfig.getoption("ansible_args")
if ansible_args:
cmd.append('converge')
if scenario:
cmd.extend(['--scenario-name', scenario])
cmd.append('--')
cmd.extend(ansible_args.split())
else:
cmd.append('test')
if scenario:
cmd.extend(['--scenario-name', scenario])
else:
cmd.append('--all')
try:
assert subprocess.call(cmd) == 0
finally:
if ansible_args:
cmd = ['python', '-m', 'molecule', 'destroy']
if scenario:
cmd.extend(['--scenario-name', scenario])
subprocess.call(cmd)

View File

@ -1,4 +0,0 @@
---
features:
- |
Add in reno support for managing releasenotes.

View File

@ -1,274 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tripleo-upgrade Release Notes'
copyright = '2017, TripleO Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tripleo-upgradeReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tripleo-upgradeReleaseNotes.tex',
'tripleo-upgrade Release Notes Documentation',
'2016, TripleO Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tripleo-upgradereleasenotes',
'tripleo-upgrade Release Notes Documentation',
['2016, TripleO Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tripleo-upgradeReleaseNotes',
'tripleo-upgrade Release Notes Documentation',
'2016, TripleO Developers', 'tripleo-upgradeReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/tripleo-upgrade'
openstackdocs_auto_name = False
openstackdocs_bug_project = 'tripleo'
openstackdocs_bug_tag = 'documentation'

View File

@ -1,27 +0,0 @@
================================================
Welcome to tripleo-upgrade Release Notes!
================================================
Contents
========
.. toctree::
:maxdepth: 2
unreleased
zed
wallaby
victoria
ussuri
train
stein
rocky
queens
pike
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View File

@ -1,6 +0,0 @@
===================================
Pike Series Release Notes
===================================
.. release-notes::
:branch: stable/pike

View File

@ -1,6 +0,0 @@
===================================
Queens Series Release Notes
===================================
.. release-notes::
:branch: stable/queens

View File

@ -1,6 +0,0 @@
===================================
Rocky Series Release Notes
===================================
.. release-notes::
:branch: stable/rocky

View File

@ -1,6 +0,0 @@
===================================
Stein Series Release Notes
===================================
.. release-notes::
:branch: stable/stein

View File

@ -1,6 +0,0 @@
==========================
Train Series Release Notes
==========================
.. release-notes::
:branch: stable/train

View File

@ -1,5 +0,0 @@
==============================
Current Series Release Notes
==============================
.. release-notes::

View File

@ -1,6 +0,0 @@
===========================
Ussuri Series Release Notes
===========================
.. release-notes::
:branch: stable/ussuri

View File

@ -1,6 +0,0 @@
=============================
Victoria Series Release Notes
=============================
.. release-notes::
:branch: stable/victoria

View File

@ -1,6 +0,0 @@
============================
Wallaby Series Release Notes
============================
.. release-notes::
:branch: stable/wallaby

View File

@ -1,6 +0,0 @@
========================
Zed Series Release Notes
========================
.. release-notes::
:branch: stable/zed

View File

@ -1,2 +0,0 @@
pbr>=1.6 # Apache-2.0
setuptools>=50.3.0 # MIT License

View File

@ -1,39 +0,0 @@
[metadata]
name = tripleo-upgrade
summary = tripleo-upgrade - An ansible role for upgrade and update a TripleO deployment
description_file =
README.rst
author = TripleO Team
author_email = openstack-discuss@lists.openstack.org
home_page = https://opendev.org/openstack/tripleo-upgrade
classifier =
License :: OSI Approved :: Apache Software License
Development Status :: 4 - Beta
Intended Audience :: Developers
Intended Audience :: System Administrators
Intended Audience :: Information Technology
Topic :: Utilities
[global]
setup-hooks =
pbr.hooks.setup_hook
[files]
data_files =
usr/local/share/ansible/roles/tripleo-upgrade/defaults = defaults/*
usr/local/share/ansible/roles/tripleo-upgrade/handlers = handlers/*
usr/local/share/ansible/roles/tripleo-upgrade/meta = meta/*
usr/local/share/ansible/roles/tripleo-upgrade/tasks = tasks/*
usr/local/share/ansible/roles/tripleo-upgrade/templates = templates/*
usr/local/share/ansible/roles/tripleo-upgrade/tests = tests/*
usr/local/share/ansible/roles/tripleo-upgrade/vars = vars/*
usr/local/share/ansible/roles/tripleo-upgrade/files = files/*
usr/local/share/ansible/roles/tripleo-upgrade/filter_plugins = filter_plugins/*
playbooks = playbooks/*
[wheel]
universal = 1
[pbr]
skip_authors = True
skip_changelog = True

View File

@ -1,20 +0,0 @@
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
setuptools.setup(
setup_requires=['pbr'],
py_modules=[],
pbr=True)

View File

@ -1,41 +0,0 @@
---
# By default we have update_serial either unset at all for OSP13 and OSP14 or
# we have it set where Pacemaker enabled nodes, CephOSD nodes and Networkers
# have it set to 1. This is mostly defensive precaution and we do allow running
# in parallel for CephOSD and Networkers for production systems that did enough
# testing on preprod or can take small outage. We should also parallelize it in
# CI as we just waste time here.
- name: Read the existing roles data file
slurp:
src: "{{ roles_data }}"
register: _roles_data_slurp
- name: Transform the roles data file update_serial values
vars:
old_roles_data: "{{ _roles_data_slurp['content'] | b64decode | from_yaml }}"
when: "'OS::TripleO::Services::Pacemaker' in old_roles_data | json_query('[].ServicesDefault[]')"
block:
- name: Backup original roles data file
copy:
src: "{{ roles_data }}"
dest: "{{ roles_data | regex_replace('.yaml$') }}.original.yaml"
remote_src: true
force: false
- name: Write new roles data file with revised update_serial
copy:
content: >-
{%- set new_data = [] %}
{%- for data_item in old_roles_data %}
{%- if 'ServiceDefault' in data_item %}
{%- if 'OS::TripleO::Services::Pacemaker' in data_item['ServicesDefault'] %}
{%- set _ = data_item.update({'update_serial': 1}) %}
{%- else %}
{%- set _ = data_item.update({'update_serial': 25}) %}
{%- endif %}
{%- endif %}
{%- set _ = new_data.append(data_item) %}
{%- endfor %}
{{- new_data | to_nice_yaml(indent=2) }}
dest: "{{ roles_data }}"

View File

@ -1,127 +0,0 @@
---
- name: register is_upstream variable
shell: |
rpm -q openstack-tripleo-heat-templates | grep '\.el[[:digit:]]\.'
failed_when: false
register: is_upstream
- name: get enabled services for each role from stack output
shell: |
source {{ undercloud_rc }}
openstack stack output show -f yaml {{ overcloud_stack_name }} EnabledServices -c output_value
register: enabled_services_out
when:
- not mock_environment|bool
- not ephemeral_heat|bool
- name: get enabled services for each role from stack export
shell: |
import yaml
e = yaml.safe_load(open("{{ working_dir }}/overcloud-deploy/{{ overcloud_stack_name }}/{{ overcloud_stack_name }}-export.yaml"))
print(yaml.dump(dict(output_value=dict(all=e['parameter_defaults']['AllNodesExtraMapData']['enabled_services']))))
args:
executable: /usr/bin/python3
register: enabled_services_out
when:
- not mock_environment|bool
- ephemeral_heat|bool
- name: set enabled_services fact
vars:
default_stdout:
output_value: {}
set_fact:
enabled_services: "{{ dict(enabled_services_out.get('stdout', default_stdout)|from_yaml).get('output_value') }}"
- name: set deployment_with_ha and deployment_with_ovs fact
set_fact:
deployment_with_ha_upstream: "{{ is_upstream.rc == 0 and ('pacemaker' in enabled_services|string) }}"
deployment_with_ovs: "{{ 'neutron_ovs_agent' in enabled_services|string }}"
deployment_with_ovn: "{{ 'neutron_plugin_ml2_ovn' in enabled_services| string }}"
- name: set ceph_osd_enabled fact
set_fact:
ceph_osd_enabled: "{{ enabled_services.values() | list | flatten | intersect(['ceph_osd','ceph_mgr','ceph_mon']) | length > 0 | bool }}"
- name: get container registry network
shell: |
{% if ephemeral_heat | bool %}
import yaml
e = yaml.safe_load(open("{{ working_dir }}/overcloud-deploy/{{ overcloud_stack_name }}/{{ overcloud_stack_name }}-export.yaml"))
print(e['parameter_defaults']['AllNodesExtraMapData']['docker_registry_network'])
{% else %}
source {{ undercloud_rc }}
openstack stack output show -f yaml {{ overcloud_stack_name }} DockerRegistryNetwork -c output_value
{% endif %}
args:
executable: "{{ (ephemeral_heat | bool) | ternary('/usr/bin/python3', '/bin/bash') }}"
register: docker_registry_network_out
when:
- not mock_environment|bool
- name: set container registry network fact
set_fact:
docker_registry_network: "{{ docker_registry_network_out.get('stdout', '') }}"
- name: get undercloud short host name
command: hostname -s
register: undercloud_short_host_name
- name: set undercloud short host name fact
set_fact:
undercloud_short_host_name: "{{ undercloud_short_host_name.stdout }}"
- name: get undercloud DNS domain name
command: hostname -d
register: undercloud_domain
- name: set undercloud DNS domain name fact
set_fact:
undercloud_domain: "{{ undercloud_domain.stdout }}"
- name: get network-data-file
shell: |
awk '/-n\s/ || /--networks-file\s/ {gsub(/[[:space:]]/, "", $2); print $2}' {{ overcloud_deploy_script }}
register: network_data_file
- name: set network_data_file fact
set_fact:
network_data_file: "{{ network_data_file.stdout }}"
- name: get env files used during deploy
shell: |
awk '/-e\s|--environment-file\s/ {gsub(/[[:space:]]/, "", $2); print $2}' {{ overcloud_deploy_script }}
register: initial_env_file
- name: set initial_env_file fact
set_fact:
initial_env_file: "{{ initial_env_file }}"
- name: get network environment fact for FFWD3
set_fact:
network_environment_file: "{{initial_env_file.stdout_lines | select('search','network-environment')| first}}"
when: initial_env_file.stdout_lines | select('search','network-environment') | list | count > 0
- name: fetch roles-data file referenced in initial deploy script
shell: |
awk '/-r\s|--roles-file\s/ {gsub(/[[:space:]]/, "", $2); print $2}' {{ overcloud_deploy_script }}
register: roles_data_file
- name: set roles-data fact
vars:
roles_data: "{{ roles_data_file.stdout }}"
set_fact:
roles_data: "{{ (roles_data_file.stdout) | ternary(roles_data, default_roles_data) }}"
custom_roles_used: "{{ (roles_data_file.stdout) | ternary(true, false) }}"
- name: check if bm nodes are present
shell: |
source {{ undercloud_rc }}
openstack baremetal node list -f value -c UUID
when: not mock_environment|bool
register: bm_node_present
- name: set fact splitstack_deployment
set_fact:
splitstack_deployment: "{{ (bm_node_present.get('stdout_lines', [])) | ternary(false, true) | default(false) }}"

View File

@ -1,5 +0,0 @@
---
- name: inject config_heat_extra.yaml
copy:
dest: "{{ install.deployment.files | basename }}/config_heat_extra.yaml"
content: "{{ config_heat_extra_yaml | to_nice_yaml }}"

View File

@ -1,123 +0,0 @@
---
- name: check customized {{ uc_containers_prepare_file }} exists
stat:
path: "{{ working_dir }}/{{ uc_containers_prepare_file }}"
register: custom_uc_containers
- name: check hieradata_override parameter already defined in {{ undercloud_conf }}
shell: |
awk -F '=' '/^hieradata_override/ {gsub(/[[:space:]]/, "", $2); print $2 }' {{ undercloud_conf }}
failed_when: false
register: defined_hieradata_override
- name: check docker_insecure_registries already defined in {{ undercloud_conf }}
shell: |
awk -F '=' '/^docker_insecure_registries/ {gsub(/[[:space:]]/, "", $2); print $2 }' {{ undercloud_conf }}
failed_when: false
register: defined_insecure_registry
- name: check container_insecure_registries already defined in {{ undercloud_conf }}
shell: |
awk -F '=' '/^container_insecure_registries/ {gsub(/[[:space:]]/, "", $2); print $2 }' {{ undercloud_conf }}
failed_when: false
register: defined_container_registry
- name: set container_cli for undercloud
ini_file:
path: "{{ undercloud_conf }}"
section: DEFAULT
option: container_cli
value: "{{ undercloud_container_cli }}"
when: undercloud_container_cli is defined
- name: set undercloud_enable_paunch for undercloud
ini_file:
path: "{{ undercloud_conf }}"
section: DEFAULT
option: undercloud_enable_paunch
value: "{{ undercloud_enable_paunch }}"
when: undercloud_enable_paunch is defined
- block:
- name: set containers file for undercloud
ini_file:
path: "{{ undercloud_conf }}"
section: DEFAULT
option: container_images_file
value: "{{ working_dir }}/{{ uc_containers_prepare_file }}"
- name: get namespaces from {{ uc_containers_prepare_file }}
slurp:
src: "{{ uc_containers_prepare_file }}"
register: prep_param_raw
- name: set namespaces
set_fact:
ceph_namespace: "{{ (prep_param_raw.content|b64decode|from_yaml).parameter_defaults.ContainerImagePrepare[0].set.ceph_namespace.split('/')[0] }}"
full_namespace: "{{ (prep_param_raw.content|b64decode|from_yaml).parameter_defaults.ContainerImagePrepare[0].set.namespace }}"
- name: set container_insecure_registries for undercloud upgrade
ini_file:
path: "{{ undercloud_conf }}"
section: DEFAULT
option: container_insecure_registries
value: "{{ full_namespace.split('/')[0] }},{{ ceph_namespace }}"
when: defined_container_registry.stdout_lines|length == 0
- name: adjust existing container_insecure_registries for undercloud upgrade
vars:
reg_list: '{{ defined_insecure_registry.stdout.split(",")|union([full_namespace.split("/")[0],ceph_namespace])|unique|join(",") }}'
replace:
path: "{{ undercloud_conf }}"
regexp: '^(container_insecure_registries\s*=)(.*)'
replace: '\1 {{ reg_list }}'
when:
- defined_insecure_registry.stdout_lines|length != 0
- full_namespace is defined
- ceph_namespace is defined
- name: adjust existing container_insecure_registries for undercloud upgrade from docker_insecure_registries
vars:
reg_list: '{{ defined_container_registry.stdout.split(",")|union([full_namespace.split("/")[0],ceph_namespace])|unique|join(",") }}'
replace:
path: "{{ undercloud_conf }}"
regexp: '^(container_insecure_registries\s*=)(.*)'
replace: '\1 {{ reg_list }}'
when:
- defined_container_registry.stdout_lines|length != 0
- full_namespace is defined
- ceph_namespace is defined
- name: remove docker_insecure_registries from undercloud config file if present
ini_file:
path: "{{ undercloud_conf }}"
section: DEFAULT
option: docker_insecure_registries
state: absent
- name: set hieradata_override to {{ undercloud_conf }} when not existing
block:
- name: Create the hieradata_override file with the basic layout
copy:
dest: "{{ undercloud_hiera }}"
content: |
parameter_defaults:
UndercloudExtraConfig:
glance_backend: 'file'
- name: Configure hieradata_override parameter
ini_file:
path: "{{ undercloud_conf }}"
section: DEFAULT
option: hieradata_override
value: "{{ undercloud_hiera }}"
# This means that hieradata_override is empty
when: defined_hieradata_override.stdout_lines|length == 0
- name: modify hieradata_override configuration when defined in {{ undercloud_conf }}
replace:
path: "{{ defined_hieradata_override.stdout }}"
regexp: 'glance_backend(.*)'
replace: "glance_backend: 'file'"
when: defined_hieradata_override.stdout_lines|length != 0

View File

@ -1,16 +0,0 @@
---
- name: create a directory to store post scripts for controller nodes
file:
path: "{{ working_dir }}/{{ node_name | splitext | first }}_post"
state: directory
- name: create post scripts for {{ node_name }}
template:
src: "check_service_{{ item }}.sh.j2"
dest: "{{ working_dir }}/{{ node_name | splitext | first }}_post/{{ item }}.sh"
mode: 0775
loop:
- 'haproxy'
- 'haproxy_backend'
- 'rabbitmq'
- 'redis'

View File

@ -1,5 +0,0 @@
---
- include_tasks: controller_post_script.yml
loop_control:
loop_var: node_name
loop: "{{ inventory_hostmap[controller_role_name] }}"

View File

@ -1,116 +0,0 @@
---
- name: register control-scale opts
shell: |
grep -oP "control-scale\ \d+" {{ overcloud_deploy_script }}
register: control_scale
ignore_errors: true
- name: remove control-scale opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ control_scale.stdout }}"
state: absent
when: control_scale is succeeded
- name: register compute-scale opts
shell: |
grep -oP "compute-scale\ \d+" {{ overcloud_deploy_script }}
register: compute_scale
ignore_errors: true
- name: remove compute-scale opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ compute_scale.stdout }}"
state: absent
when: compute_scale is succeeded
- name: register ceph-storage-scale opts
shell: |
grep -oP "ceph-storage-scale\ \d+" {{ overcloud_deploy_script }}
register: ceph_scale
ignore_errors: true
- name: remove ceph-storage-scale opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ ceph_scale.stdout }}"
state: absent
when: ceph_scale is succeeded
- name: register control-flavor opts
shell: |
grep -oP "control-flavor\ .*\ " {{ overcloud_deploy_script }}
register: control_flavor
ignore_errors: true
- name: remove control-flavor opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ control_flavor.stdout }}"
state: absent
when: control_flavor is succeeded
- name: register compute-flavor opts
shell: |
grep -oP "compute-flavor\ .*\ " {{ overcloud_deploy_script }}
register: compute_flavor
ignore_errors: true
- name: remove compute-flavor opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ compute_flavor.stdout }}"
state: absent
when: compute_flavor is succeeded
- name: register ceph-flavor opts
shell: |
grep -oP "ceph-storage-flavor\ .*\ " {{ overcloud_deploy_script }}
register: ceph_flavor
ignore_errors: true
- name: remove ceph-flavor opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ ceph_flavor.stdout }}"
state: absent
when: ceph_flavor is succeeded
- name: register ntp-server opts
shell: |
grep -oP "ntp-server\ .*\ " {{ overcloud_deploy_script }}
register: ntp_server
ignore_errors: true
- name: remove ntp-server opts from deploy command
lineinfile:
path: "{{ overcloud_deploy_script }}"
regexp: "{{ ntp_server.stdout }}"
state: absent
when: ntp_server is succeeded
- name: convert cli options into parameters
template:
src: fast-forward-upgrade/cli_opts_params.yaml.j2
dest: "{{ working_dir }}/cli_opts_params.yaml"
force: false
when:
- >
control_scale is succeeded or
compute_scale is succeeded or
ceph_scale is succeeded or
control_flavor is succeeded or
compute_flavor is succeeded or
ceph_flavor is succeeded or
ntp_server is succeeded
- name: check "{{ working_dir }}/cli_opts_params.yaml" exists
stat:
path: "{{ working_dir }}/cli_opts_params.yaml"
register: cli_opts_param_file
- name: set cli_converted_options fact
set_fact:
cli_converted_options: true
when: cli_opts_param_file.stat.exists|bool

View File

@ -1,249 +0,0 @@
---
- name: Register roles data file location if exists
shell: "grep '\\-r\\ \\|\\-\\-roles' {{ overcloud_deploy_script }} | awk {'print $2'}"
register: custom_roles_file
ignore_errors: true
- name: Check if roles data has already been adjusted
stat:
path: "{{ custom_roles_file.stdout }}.pre_pike_upgrade"
register: custom_roles_adjusted
- block:
- name: Make a copy of the custom roles data file
copy:
src: "{{ custom_roles_file.stdout }}"
dest: "{{ custom_roles_file.stdout }}.pre_pike_upgrade"
remote_src: true
- name: Assigns deprecated params to Controller role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: "^(- name: Controller( # the 'primary' role goes first)?$)"
replace: "{{ item }}"
loop:
- '\1\n deprecated_param_image: "controllerImage"'
- '\1\n deprecated_param_flavor: "OvercloudControlFlavor"'
- '\1\n deprecated_param_extraconfig: "controllerExtraConfig"'
- '\1\n uses_deprecated_params: True'
- name: Assigns network attributes to Controller role or custom controller roles
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Controller.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - External\n - InternalApi\n - Storage\n - StorageMgmt\n - Tenant'
- '\1\n tags:\n - primary\n - controller'
- '\1\n description: |\n Controller role that has all the controler services loaded and handles\n Database, Messaging and Network functions.'
- name: Assigns deprecated params to Compute role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Compute$)'
replace: "{{ item }}"
loop:
- '\1\n deprecated_server_resource_name: "NovaCompute"'
- '\1\n deprecated_param_ips: "NovaComputeIPs"'
- '\1\n deprecated_param_scheduler_hints: "NovaComputeSchedulerHints"'
- '\1\n deprecated_param_metadata: "NovaComputeServerMetadata"'
- '\1\n deprecated_param_extraconfig: "NovaComputeExtraConfig"'
- '\1\n deprecated_param_image: "NovaImage"'
- '\1\n uses_deprecated_params: True'
- name: Assigns network attributes to Compute role or custom compute roles
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Compute.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi\n - Storage\n - Tenant'
- '\1\n description: |\n Basic Compute Node role'
- name: Assigns new attributes to AltCompute role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: AltCompute.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi\n - Storage\n - Tenant'
- '\1\n description: |\n Basic Compute Node role'
- name: Assigns new attributes to BlockStorage role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: BlockStorage.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi\n - Storage\n - StorageMgmt'
- '\1\n description: |\n Cinder Block Storage node role'
- name: Assigns deprecated params to ObjectStorage role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: ObjectStorage$)'
replace: "{{ item }}"
loop:
- '\1\n deprecated_param_flavor: "OvercloudSwiftStorageFlavor"'
- '\1\n deprecated_param_image: "SwiftStorageImage"'
- '\1\n deprecated_param_ips: "SwiftStorageIPs"'
- '\1\n deprecated_param_metadata: "SwiftStorageServerMetadata"'
- '\1\n uses_deprecated_params: True'
- name: Assigns network attributes to ObjectStorage role or custom object storage roles
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: ObjectStorage.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi\n - Storage\n - StorageMgmt'
- '\1\n description: |\n Swift Object Storage node role'
- name: Assigns new attributes to CephStorage role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: CephStorage.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - Storage\n - StorageMgmt'
- '\1\n description: |\n Ceph OSD Storage node role'
- name: Assigns new attributes to Database role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Database.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi'
- '\1\n description: |\n Standalone database role with the database being managed via Pacemaker'
- name: Assigns new attributes to Galera role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Galera.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi'
- '\1\n description: |\n Standalone database role with the database being managed via Pacemaker'
- name: Assigns new attributes to Networker role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Networker.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi\n - Tenant'
- '\1\n description: |\n Standalone networking role to run Neutron agents'
- name: Assigns new attributes to Messaging role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Messaging.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - InternalApi'
- '\1\n description: |\n Standalone messaging role with RabbitMQ being managed via Pacemaker'
- name: Assigns new attributes to Monitor role
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '^(- name: Monitor.*)'
replace: "{{ item }}"
loop:
- '\1\n networks:\n - Storage'
- '\1\n description: |\n Ceph Monitor role'
- name: Add services common to all roles introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::Timezone)'
replace: "{{ item }}"
loop:
- '\1\n - OS::TripleO::Services::CertmongerUser'
- '\1\n - OS::TripleO::Services::Docker'
- '\1\n - OS::TripleO::Services::Securetty'
- '\1\n - OS::TripleO::Services::Tuned'
- '\1\n - OS::TripleO::Services::ContainersLogrotateCrond'
- name: Add CinderBackend services introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- .*CinderVolume)'
replace: "{{ item }}"
loop:
- '\1\n - OS::TripleO::Services::CinderBackendVRTSHyperScale'
- '\1\n - OS::TripleO::Services::CinderBackendDellEMCUnity'
- '\1\n - OS::TripleO::Services::CinderBackendDellEMCVMAXISCSI'
- name: Add Clustercheck service introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::MySQL$)'
replace: '\1\n - OS::TripleO::Services::Clustercheck'
- name: Add ExternalSwiftProxy service introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::SwiftProxy)'
replace: '\1\n - OS::TripleO::Services::ExternalSwiftProxy'
- name: Add Iscsid service introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: "{{ item }}"
replace: '\1\n - OS::TripleO::Services::Iscsid'
loop:
- '(- .*CinderVolume)'
- '(- OS::TripleO::Services::NovaCompute)'
- name: Add Neutron API services introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::NeutronApi)'
replace: "{{ item }}"
loop:
- '\1\n - OS::TripleO::Services::NeutronBgpVpnApi'
- '\1\n - OS::TripleO::Services::NeutronL2gwApi'
- name: Add Neutron agents introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::NeutronL3Agent)'
replace: "{{ item }}"
loop:
- '\1\n - OS::TripleO::Services::NeutronL2gwAgent'
- '\1\n - OS::TripleO::Services::NeutronLbaasv2Agent'
- name: Add Neutron agents introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- .*NeutronOvsAgent)'
replace: "{{ item }}"
loop:
- '\1\n - OS::TripleO::Services::NeutronVppAgent'
- '\1\n - OS::TripleO::Services::NeutronLinuxbridgeAgent'
- '\1\n - OS::TripleO::Services::Vpp'
- name: Add NovaMigrationTarget service introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::NovaCompute)'
replace: '\1\n - OS::TripleO::Services::NovaMigrationTarget'
- name: Add OVNController service introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::OVNDBs)'
replace: '\1\n - OS::TripleO::Services::OVNController'
- name: Add Manila backend services introduced in Pike
replace:
dest: '{{ custom_roles_file.stdout }}'
regexp: '(- OS::TripleO::Services::ManilaShare)'
replace: "{{ item }}"
loop:
- '\1\n - OS::TripleO::Services::ManilaBackendIsilon'
- '\1\n - OS::TripleO::Services::ManilaBackendUnity'
- '\1\n - OS::TripleO::Services::ManilaBackendVMAX'
- '\1\n - OS::TripleO::Services::ManilaBackendVNX'
when: custom_roles_file.stdout|length > 0 and not custom_roles_adjusted.stat.exists

View File

@ -1,9 +0,0 @@
---
- name: Create FIP HTTP check scripts
template:
src: "{{ item }}"
dest: "{{ working_dir }}/{{ item.split('.') | first }}.sh"
mode: 0775
loop:
- 'fip_http_check_start.sh.j2'
- 'fip_http_check_stop.sh.j2'

View File

@ -1,26 +0,0 @@
---
- block:
- name: Ensure bc package is installed in the system
package:
name: "bc"
state: latest
become: true
become_user: root
- name: create start l3 agent connectivity check script
template:
src: "l3_agent_start_ping.sh.j2"
dest: "{{ l3_agent_connectivity_check_start_script }}"
mode: 0775
- name: create start l3 agent connectivity wait script
template:
src: "l3_agent_wait_ping.sh.j2"
dest: "{{ l3_agent_connectivity_check_wait_script }}"
mode: 0775
- name: create stop l3 agent connectivity check script
template:
src: "l3_agent_stop_ping.sh.j2"
dest: "{{ l3_agent_connectivity_check_stop_script }}"
mode: 0775
when: l3_agent_connectivity_check|bool

View File

@ -1,14 +0,0 @@
---
- block:
- name: create l3 agent failover check pre script
template:
src: "l3_agent_failover_pre.sh.j2"
dest: "{{ working_dir }}/l3_agent_failover_pre.sh"
mode: 0775
- name: create l3 agent failover check post scripts
template:
src: "l3_agent_failover_post.sh.j2"
dest: "{{ working_dir }}/l3_agent_failover_post.sh"
mode: 0775
when: l3_agent_failover_check|bool

View File

@ -1,15 +0,0 @@
---
- block:
- name: create log playbook
template:
src: "collect_logs.yaml.j2"
dest: "{{ log_playbook }}"
mode: 0775
- name: create script to run log playbook
template:
src: "collect_logs.sh.j2"
dest: "{{ log_playbook_script }}-{{ log_current_stage }}.sh"
mode: 0775
when: log_stages|bool

View File

@ -1,7 +0,0 @@
---
- name: create post nova actions test
template:
src: "nova_actions_check.sh.j2"
dest: "{{ working_dir }}/nova_actions_check.sh"
mode: 0775
when: nova_actions_check|bool

View File

@ -1,18 +0,0 @@
---
- name: create workload launch script
template:
src: "workload_launch.sh.j2"
dest: "{{ workload_launch_script }}"
mode: 0775
- name: create start l3 agent connectivity check scripts
template:
src: "l3_agent_start_ping.sh.j2"
dest: "{{ l3_agent_connectivity_check_start_script }}"
mode: 0775
- name: create stop l3 agent connectivity check scripts
template:
src: "l3_agent_stop_ping.sh.j2"
dest: "{{ l3_agent_connectivity_check_stop_script }}"
mode: 0775

View File

@ -1,14 +0,0 @@
---
- name: l3 agent connectivity wait until vm is ready
shell: |
source {{ overcloud_rc }}
{{ l3_agent_connectivity_check_wait_script }}
when: l3_agent_connectivity_check
- name: start l3 agent connectivity check
shell: |
source {{ overcloud_rc }}
{{ l3_agent_connectivity_check_start_script }}
when: l3_agent_connectivity_check
async: 21660
poll: 0

View File

@ -1,6 +0,0 @@
---
- name: stop l3 agent connectivity check
shell: |
source {{ overcloud_rc }}
{{ l3_agent_connectivity_check_stop_script }} {{ current_stage_error|default(loss_threshold) }}
when: l3_agent_connectivity_check

View File

@ -1,6 +0,0 @@
---
- name: run l3 agent failover post script
shell: |
source {{ overcloud_rc }}
{{ working_dir }}/l3_agent_failover_post.sh
when: l3_agent_failover_check|bool

View File

@ -1,6 +0,0 @@
---
- name: run l3 agent failover pre script
shell: |
source {{ overcloud_rc }}
{{ working_dir }}/l3_agent_failover_pre.sh
when: l3_agent_failover_check|bool

View File

@ -1,68 +0,0 @@
---
- name: load inventory file
slurp:
src: "{{ upgrade_validation_inventory }}"
register: upgrade_tripleo_inventory
- name: set inventory facts
set_fact:
inventory_rolemap: "{{ upgrade_tripleo_inventory.content | b64decode | to_inventory_rolemap }}"
inventory_hostmap: "{{ upgrade_tripleo_inventory.content | b64decode | to_inventory_hostmap }}"
inventory_roles: "{{ upgrade_tripleo_inventory.content | b64decode | to_inventory_roles }}"
- name: store roles and register controller role name
set_fact:
oc_roles: "{{ oc_roles + inventory_roles }}"
controller_role_name: "{{ inventory_roles | map('regex_search', '^[A-Za-z0-9]*[Cc]ontroller[A-Za-z0-9]*$') | select('string') | list | last | default('') }}"
- name: register compute role name
set_fact:
compute_role_name: "{{ inventory_roles | map('regex_search', '^[A-Za-z0-9]*[Cc]ompute[A-Za-z0-9]*$') | select('string') | list | last | default('') }}"
- name: register ceph storage role name
set_fact:
cephstorage_role_name: "{{ inventory_roles | map('regex_search', '^[A-Za-z0-9]*[Cc]eph[Ss]torage[A-Za-z0-9]*$') | select('string') | list | last | default('') }}"
- name: store sorted roles with controller first(default)
set_fact:
oc_roles: "{{ oc_roles|intersect([controller_role_name]) + oc_roles|difference([controller_role_name]+[compute_role_name]+[cephstorage_role_name]) \
+ oc_roles|intersect([compute_role_name]) + oc_roles|intersect([cephstorage_role_name]) }}"
when: roles_upgrade_order == '__undefined__'
- name: store sorted roles with controller first(user-defined)
set_fact:
oc_roles: "{{ roles_upgrade_order.split(';') }}"
when: roles_upgrade_order != '__undefined__'
# In order to get the upgrade working, in the Controller role we
# need to start always by the bootstrap node for Pacemaker.
- name: Retrieve the pacemaker bootstrap controller node.
shell: "sudo hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name"
become_user: "{{ (overcloud_ssh_user) | ternary(overcloud_ssh_user, 'tripleo-admin') }}"
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
register: pcmkr_bootstrap_node_out
when:
- not update_cell|bool
- not mock_environment|bool
- controller_role_name|length > 0
- name: Create ordered Controller's host set (bootstrap node first)
vars:
pcmkr_bootstrap_node: "{{ (pcmkr_bootstrap_node_out|default(omit)).get('stdout', '') }}"
controllers_from_inventory: "{{ (inventory_hostmap[controller_role_name] | default('')) | list }}"
set_fact:
controllers_ordered: "{{ controllers_from_inventory|intersect([pcmkr_bootstrap_node]) + controllers_from_inventory|difference([pcmkr_bootstrap_node]) }}"
when: not update_cell|bool
- name: create hosts per role fact
set_fact:
oc_roles_hosts: "{{ oc_roles_hosts | combine({ item : controllers_ordered if item == controller_role_name else inventory_hostmap[item]|list }) }}"
loop: "{{ inventory_roles }}"
when: not update_cell | bool
- name: create hosts per role fact for cells
set_fact:
oc_roles_hosts: "{{ oc_roles_hosts | combine({ item : inventory_hostmap[item]|list }) }}"
loop: "{{ inventory_roles }}"
when: update_cell | bool

View File

@ -1,8 +0,0 @@
---
- name: run post nova actions test
shell: |
set -o pipefail
source {{ overcloud_rc }}
{{ working_dir }}/nova_actions_check.sh 2>&1 {{ timestamper_cmd }} | \
tee {{ working_dir }}/nova_actions_check.log
when: nova_actions_check|bool

View File

@ -1,19 +0,0 @@
---
- name: determine the name of the replaced environment file
shell:
cat {{ overcloud_deploy_script }} | grep '-e .\+{{ item.key }} ' | sed 's/.\+ \(.\+{{ item.key }}\) .*/\1/'
register: ffu_replaced_env_file
- block:
- name: save a copy of the replaced environment configuration file
copy:
remote_src: true
src: "{{ ffu_replaced_env_file.stdout }}"
dest: "{{ ffu_replaced_env_file.stdout }}.orig.yaml"
- name: replace the environment configuration file with the new one
copy:
src: "{{ item.value }}"
dest: "{{ ffu_replaced_env_file.stdout }}"
when: ffu_replaced_env_file.stdout|length > 0

View File

@ -1,4 +0,0 @@
---
- name: loop over the replacement environment configuration files
include_tasks: replace_environment_file.yaml
loop: "{{ install.overcloud.ffu.replace.env.files | default({}) | dict2items }}"

View File

@ -1,39 +0,0 @@
---
- name: Check undercloud.conf exists
stat:
path: "{{ working_dir }}/undercloud.conf"
register: under_conf_exist
- block:
- name: "Retrieve remote ini file"
fetch:
src: "{{ working_dir }}/undercloud.conf"
dest: '/tmp/'
flat: true
- name: Read current custom_env_files files
set_fact:
current_custom_env: "{{ lookup( 'ini', 'custom_env_files section=DEFAULT file=/tmp/undercloud.conf') }}"
- name: Append skip_rhel_release.yaml to custom_env_files
ini_file:
path: "{{ working_dir }}/undercloud.conf"
section: DEFAULT
option: custom_env_files
value: "{{ current_custom_env.split(',')| union([working_dir+'/skip_rhel_release.yaml'])|map('trim')|unique|join(',')}}"
backup: true
when: current_custom_env | default([]) | length > 0
- name: Insert custom_env_files
ini_file:
path: "{{ working_dir }}/undercloud.conf"
section: DEFAULT
option: custom_env_files
value: "{{ working_dir }}/skip_rhel_release.yaml"
backup: true
when: current_custom_env | default([]) | length == 0
- name: Create the heat parameter file for undercloud.
vars:
skip_rhel:
parameter_defaults:
SkipRhelEnforcement: true
copy:
content: "{{ skip_rhel | to_nice_yaml }}"
dest: "{{ working_dir }}/skip_rhel_release.yaml"
when: under_conf_exist.stat.exists

View File

@ -1,7 +0,0 @@
---
- name: Adjust ssh config to skip host key check
copy:
src: ssh_config
dest: "~/.ssh/config"
mode: 0600
when: need_ssh_config|bool

View File

@ -1,5 +0,0 @@
---
- name: collect logs on the overcloud for the current stage
shell: |
{{ log_playbook_script }}-{{ log_current_stage }}.sh &>> {{ log_playbook_script }}-{{ log_current_stage }}.log
when: log_stages|bool

View File

@ -1,135 +0,0 @@
---
- name: Remove old RHEL7 packages
# Remove all el7ost packages except those which could imply the removal
# (direct or indirect) of the leapp and subscription-manager packages.
shell: >-
yum -y remove
*el7ost*
galera*
haproxy*
httpd
mysql*
pacemaker*
xinetd
python-jsonpointer
qemu-kvm-common-rhev
qemu-img-rhev
rabbit*
redis*
python3*
--
-*openvswitch*
-python-docker
-python-PyMySQL
-python-pysocks
-python2-asn1crypto
-python2-babel
-python2-cffi
-python2-cryptography
-python2-dateutil
-python2-idna
-python2-ipaddress
-python2-jinja2
-python2-jsonpatch
-python2-markupsafe
-python2-pyOpenSSL
-python2-requests
-python2-six
-python2-urllib3
-python-httplib2
-python-passlib
-python2-netaddr
-ceph-ansible
-python2-chardet
- name: Install leapp
package:
name:
- leapp
- leapp-repository
state: latest
when:
- not leapp_unsubscribed|bool
- name: "Add packages into leapp's to_remove/to_install/to_keep files"
lineinfile:
path: "/etc/leapp/transaction/{{ item.file }}"
line: "{{ item.package }}"
loop:
- file: 'to_remove'
package: 'openvswitch2.11'
- file: 'to_install'
package: 'openvswitch2.13'
- file: 'to_keep'
package: 'ceph-ansible'
- name: Download required leapp files
get_url:
url: "https://gitlab.cee.redhat.com/leapp/oamg-rhel7-vagrant/raw/master/roles/init/files/leapp-data/{{ item }}"
dest: "/etc/leapp/files/{{ item }}"
validate_certs: false
mode: '0700'
loop:
- 'pes-events.json'
- 'device_driver_deprecation_data.json'
- 'repomap.json'
- 'unsupported_driver_names.json'
- 'unsupported_pci_ids.json'
register: get_url_result
until: get_url_result.status_code == 200
delay: 10
retries: 3
- name: Check if rhos-release is installed
package:
name: rhos-release
state: present
check_mode: true
failed_when: false
register: rhos_release_installed
- block:
- name: Remove rhos-release repos
command: rhos-release -x
- name: Remove conflicting rhos-release package
package:
name: rhos-release
state: absent
when:
- not leapp_unsubscribed|bool
- rhos_release_installed.rc == 0
- not rhos_release_installed.changed
- name: Get enablerepo line for leapp
shell: |
yum repolist -q 2> /dev/null | grep -e rhos-16 -e 'rhel-8.*baseos' -e 'rhel-8.*appstream' -e 'rhel-8.*fdp' -e 'rhel-8.*av' -e 'ansible' | cut -d '/' -f1 | \
awk '{print "--enablerepo " $0 " "}' | tr -d '\n'
when: leapp_unsubscribed|bool
register: leapp_unsubscribed_options
- name: Run leapp upgrade (download packages)
shell: |
set -o pipefail
{% if leapp_unsubscribed|bool and (upgrade_workarounds|bool or ffu_upgrade_workarounds|bool) %} LEAPP_UNSUPPORTED=1 LEAPP_NO_RHSM=1 {% endif %}\
{% if leapp_skip_release_check|bool %} LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE=1 {% endif %} sudo -E leapp upgrade \
{% if leapp_unsubscribed|bool %} {{ leapp_unsubscribed_options.stdout }} {% endif %} --debug 2>&1 | \
tee {{ working_dir }}/undercloud_leapp_upgrade.log
- name: Relabel SELinux for filesystem after reboot.
file:
path: /.autorelabel
state: touch
# Reboot, and wait for 30 mins
- name: Reboot the undercloud
reboot:
reboot_timeout: 1800
- name: Unregister the node once the OS was upgraded if desired
redhat_subscription:
state: absent
when:
- leapp_unregister|bool
- not leapp_unsubscribed|bool

View File

@ -1,23 +0,0 @@
---
- name: Ensure Openvswitch is enabled and running
service:
name: openvswitch
enabled: true
state: started
- name: Update needed packages prior the undercloud upgrade
package:
name:
- "{{ (ansible_python.version.major is version('3', '>=')) | ternary('python3-tripleoclient', 'python-tripleoclient') }}"
state: latest
- name: get installed package fact
package_facts:
manager: "auto"
- name: Update Ceph Ansible prior upgrade
package:
name:
- "ceph-ansible"
state: latest
when: "'ceph-ansible' in ansible_facts.packages"

View File

@ -1,45 +0,0 @@
---
- name: register latest installed kernel version
shell: |
rpm -qa | grep ^kernel-[0-9] | sort | tail -1 | awk -F 'kernel-' {'print $2'}
register: installed_kernel
- name: register loaded kernel
command: uname -r
register: loaded_kernel
- name: register installed openvswitch package version
shell: |
rpm --queryformat %{VERSION} -q openvswitch | awk -F "." '{print $1"."$2}'
register: installed_ovs
- name: register loaded openvswitch version
shell: |
ovs-vsctl show | grep ovs_version | awk -F \" {'print $2'} | awk -F "." '{print $1"."$2}'
become: true
become_user: root
register: loaded_ovs
- name: handle undercloud reboot
block:
# Reboot, and wait for 30 mins
- name: Reboot the undercloud
reboot:
reboot_timeout: 1800
become: true
become_user: root
- name: assert UC services started
shell: |
source {{ undercloud_rc }}
timeout 10 openstack endpoint list
ignore_errors: true
retries: "{{ service_readiness_count|default(100)|int }}"
delay: 3
when:
- not tripleo_ci
- >
undercloud_reboot or
(installed_kernel.stdout != loaded_kernel.stdout) or
(installed_ovs.stdout != loaded_ovs.stdout)
tags: undercloud_reboot

View File

@ -1,53 +0,0 @@
---
# validation_group: validation group to execute
# skiplist_validations: comma separated string of the validations to be skipped in a group
# validation_allowed_groups: list of allowed groups to run
# validation_args: string containing extra arguments for the validation command. (defaults to empty string)
- block:
- name: "Retrieve validations for group {{ validation_group }}"
command: "openstack tripleo validator list --group {{ validation_group }} -f value -c ID"
register: validations_in_group
- name: Set fact validations_filtered which removes validations from skiplist
set_fact:
validations_filtered: "{{ validations_in_group.stdout_lines | difference( skiplist_validations.split(',') ) }}"
- name: "Running validations {{ validations_filtered }}"
register: validations_result
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack tripleo validator run --validation {{ validations_filtered | join(',') }} --inventory {{ upgrade_validation_inventory }} \
{{ validations_extra_args }} 2>&1 {{ timestamper_cmd }} >> validation-{{ validation_group }}.log
when:
- "validations_filtered|length > 0"
args:
executable: /usr/bin/bash
- name: Validation group not executed
debug:
msg: "Validation group {{ validation_group }} is skipped."
when:
- "validations_filtered|length == 0"
rescue:
- name: Retrieve the validations results
command: openstack tripleo validator show history -f json -c UUID -c Status
register: validation_history
- name: Set fact validation_status with the validations results
set_fact:
validations_failed: "{{ validation_history.stdout|default('')|from_json|json_query(\"[?Status != 'PASSED'].UUID\") }}"
when: validation_history is defined
- name: Log all the validations failed
shell: |
openstack tripleo validator show run {{ item }} &>> validation-{{ validation_group }}-failed.log
loop: "{{ validations_failed }}"
when:
- "validations_failed|length > 0"
- name: Fail if some validation is not PASSED
fail:
msg: "Validation failed: check the log in validation-{{ validation_group }}-failed.log."
when:
- "validations_failed|length > 0"
- name: Fail if the validations command didn't succeed
fail:
msg: "Validations failed: check the log in validation-{{ validation_group }}.log."
when:
- validations_result is not succeeded
when: "validation_group in validation_allowed_groups"

View File

@ -1,39 +0,0 @@
---
- set_fact:
pcs_upgrade_hosts: "{{ item | reject('none') | join(',') + ',' + pcs_upgrade_hosts }}"
when: pcs_present
- name: create compute pre upgrade script for {{ item }}
when:
- compute_present | bool
- (workload_launch | bool) or (workload_external | bool)
vars:
node_name: "{{ item | reject('none') | join(',') }}"
template:
src: "node_upgrade_pre.sh.j2"
dest: "{{ node_name }}_upgrade_pre.sh"
mode: 0775
force: true
- name: create overcloud system upgrade script for {{ item }}
vars:
hosts: "{{ item | reject('none') | join(',') }}"
template:
src: "fast-forward-upgrade/overcloud_system_upgrade.sh.j2"
dest: "{{ overcloud_system_upgrade_script_base }}-{{ hosts }}.sh"
mode: 0775
force: true
- name: create overcloud upgrade script for {{ item }}
vars:
hosts: "{{ pcs_upgrade_hosts | regex_replace(',$', '')
if pcs_present else item | reject('none') | join(',') }}"
transfer_data: >
{{ inventory_hostmap.mysql | intersect (item) | length > 0 }}
stop_services: >
{{ inventory_hostmap.pacemaker | intersect (item) | length > 0 }}
template:
src: "fast-forward-upgrade/overcloud_upgrade_run.sh.j2"
dest: "{{ overcloud_upgrade_run_script_base }}-{{ hosts }}.sh"
mode: 0775
force: true

View File

@ -1,186 +0,0 @@
---
- name: create a compatible deployment scripts from oooq
include_tasks: ../upgrade/use_oooq.yaml
args:
apply:
tags: use_oooq
tags: use_oooq
when: use_oooq|bool
- name: make a copy of the initial overcloud deploy script
copy:
remote_src: true
src: "{{ overcloud_deploy_script }}"
dest: "{{ overcloud_deploy_script }}.orig.sh"
- name: Convert CLI options to parameters
import_tasks: ../common/convert_cli_opts_params.yaml
- name: get auxiliary facts for upgrade
import_tasks: ../common/auxilary-facts.yaml
- name: generate roles list from inventory file
import_tasks: ../common/load_roles_from_inventory.yaml
- name: create ffu upgrade workaround scripts
template:
src: workarounds.sh.j2
dest: "{{ working_dir }}/{{ item }}.sh"
mode: 0775
force: true
loop:
- 'pre_ffu_overcloud_upgrade_prepare_workarounds'
- 'post_ffu_overcloud_upgrade_prepare_workarounds'
- 'pre_ffu_overcloud_os_upgrade_workarounds'
- 'post_ffu_overcloud_os_upgrade_workarounds'
- 'pre_ffu_overcloud_upgrade_workarounds'
- 'post_ffu_overcloud_upgrade_workarounds'
- 'pre_ffu_overcloud_converge_workarounds'
- 'post_ffu_overcloud_converge_workarounds'
- 'pre_ffu_overcloud_ceph_workarounds'
- 'post_ffu_overcloud_ceph_workarounds'
when: ffu_upgrade_workarounds
- name: create the custom upgrade init commands
template:
src: "upgrade_init_command.yaml.j2"
dest: "{{ upgrade_init_command_dest }}"
mode: 0775
when: upgrade_init_command is string
- name: create overcloud_prepare_containers.sh script
template:
src: overcloud_prepare_containers.sh.j2
dest: "{{ working_dir }}/overcloud_prepare_containers.sh"
mode: 0755
force: true
- set_fact:
pcs_upgrade_hosts: ""
- name: Upgrade without outage
when: not fast_and_furious|bool
block:
# The below filter chain does the following:
# - Get the list of roles NOT present in the nova_compute groups
# - Get the list of hosts in each role into an array
# - Sort the resulting list of hosts for each role
# - Return a list of host lists. eg: [['ctrl-1', 'ctrl2', 'ctrl-3'], ['DB1']]
# - OR if the role does not exist, it returns []
- set_fact:
oc_role_host_list_controlplane: >-
{{ (oc_roles_hosts|dict2items |
rejectattr('key', 'in', inventory_rolemap['nova_compute'] | default([]))) |
map(attribute='value') | map('sort') | list }}
# - Then with_together does a zip_longest to combine the list of lists,
# using None to fill the gaps.
# - We take care of the possible [] value by replacing it with two empty lists.
- name: create controlplane based overcloud system upgrade script for {{ item }}
vars:
pcs_present: true
compute_present: false
include_tasks: create-overcloud-ffu-hosts-scripts.yaml
with_together: "{{ (oc_role_host_list_controlplane | length == 0) | ternary([[], []], oc_role_host_list_controlplane) }}"
- set_fact:
pcs_upgrade_hosts: ""
# The below filter chain does the following:
# - Get the list of roles present in the nova_compute group
# - Get the list of hosts in each role into an array
# - Sort the resulting list of hosts for each role
# - Return a list of host lists
# - OR if the role does not exist, it returns []
- set_fact:
oc_role_host_list_nova_compute: >-
{{ (oc_roles_hosts|dict2items |
selectattr('key', 'in', inventory_rolemap['nova_compute'] | default([]))) |
map(attribute='value') | map('sort') | list }}
# - Then with_together does a zip_longest to combine the list of lists,
# using None to fill the gaps.
# - We take care of the possible [] value by replacing it with two empty lists.
- name: create compute based overcloud system upgrade script for {{ item }}
vars:
pcs_present: false
compute_present: true
include_tasks: create-overcloud-ffu-hosts-scripts.yaml
with_together: "{{ (oc_role_host_list_nova_compute | length == 0) | ternary([[], []], oc_role_host_list_nova_compute) }}"
- name: Upgrade with outage
when: fast_and_furious|bool
block:
# - Then with_together does a zip_longest to combine the list of lists,
# using None to fill the gaps.
# - We take care of the possible [] value by replacing it with two empty lists.
- name: create whole overcloud system upgrade script
vars:
pcs_present: false
compute_present: false
include_tasks: create-overcloud-ffu-hosts-scripts.yaml
with_together: "{{ (oc_roles_hosts|dict2items | default([])) | map(attribute='value') | map('sort') | flatten(1) + ['undercloud'] }}"
- name: build extra templates based on new options.
import_tasks: ../common/build_extra_template.yaml
when: config_heat_extra_yaml is defined
- name: create overcloud upgrade prepare script
vars:
old_img:
- "{{ working_dir }}/{{ container_registry_file }}"
- "/usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
- "{{ network_environment_file | default('') }}"
template:
src: "overcloud_upgrade_prepare.sh.j2"
dest: "{{ overcloud_upgrade_prepare_script }}"
mode: 0775
force: true
- name: create overcloud converge script
vars:
old_img:
- "{{ working_dir }}/{{ container_registry_file }}"
- "/usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
- "{{ network_environment_file | default('') }}"
template:
src: "overcloud_upgrade_converge.sh.j2"
dest: "{{ working_dir }}/overcloud_upgrade_converge.sh"
mode: 0775
force: true
- name: create ceph_host_limit file
vars:
# roles where the cephadm-admin and keyring is shared
ceph_limit_roles:
- Undercloud
- ceph_osd
- ceph_mon
template:
src: "ceph_host_limit.txt.j2"
dest: "{{ working_dir }}/ceph_host_limit.txt"
mode: 0764
- name: create ceph upgrade script
template:
src: "ceph-upgrade-run.sh.j2"
dest: "{{ working_dir }}/ceph-upgrade-run.sh"
mode: 0755
- name: import ssh_config_skip_host tasks
import_tasks: ../common/ssh_config_skip_host.yml
- name: Create post upgrade scripts for controller nodes
import_tasks: ../common/controller_post_scripts.yml
- name: import create_l3_agent_connectivity_check_script tasks
import_tasks: ../common/create_l3_agent_connectivity_check_script.yml
- name: import create_l3_agent_failover_check_script tasks
import_tasks: ../common/create_l3_agent_failover_check_script.yml
- name: create nova actions check script
import_tasks: ../common/create_nova_actions_check_script.yml
- name: import create HTTP test scripts
import_tasks: ../common/create_http_test_scripts.yml

View File

@ -1,30 +0,0 @@
---
- name: Ensure we skip Rhel Enforcement
include_tasks: ../common/skip_rhel_enforcement.yaml
when: not ( enforce_rhel|bool )
- name: create pre-upgrade validation script for old version
template:
src: "pre-upgrade-osp13-validation.sh.j2"
dest: "{{ working_dir }}/pre-upgrade-validation.sh"
mode: 0775
force: true
- name: create undercloud upgrade script
template:
src: "undercloud_upgrade.sh.j2"
dest: "{{ working_dir }}/ffu_undercloud_upgrade.sh"
mode: 0775
force: true
- name: create ffu upgrade workaround scripts
template:
src: workarounds.sh.j2
dest: "{{ working_dir }}/{{ item }}.sh"
mode: 0775
force: true
loop:
- 'pre_ffu_undercloud_os_upgrade_workarounds'
- 'post_ffu_undercloud_os_upgrade_workarounds'
- 'pre_ffu_undercloud_upgrade_workarounds'
- 'post_ffu_undercloud_upgrade_workarounds'

View File

@ -1,32 +0,0 @@
---
- name: upgrade ceph post ffu
shell: |
source {{ undercloud_rc }}
{{ working_dir }}/ffu_upgrade_ceph_script.sh 2>&1 {{ timestamper_cmd }} > \
{{ working_dir }}/ffu_upgrade_ceph.log
register: ffu_upgrade_ceph
ignore_errors: true
- block:
- name: print stack failures
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack stack failures list --long {{ overcloud_stack_name }} 2>&1 {{ timestamper_cmd }} | \
tee {{ working_dir }}/ffu_upgrade_ceph_failed.log
- name: print resource list
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack stack resource list --filter status=FAILED --nested-depth 5 {{ overcloud_stack_name }} \
2>&1 {{ timestamper_cmd }} | tee {{ working_dir }}/ffu_upgrade_ceph_failed_resources.log
when:
- ffu_upgrade_ceph is failed
- not ephemeral_heat|bool
- name: was the ffu update ceph successful.
fail: msg="FFU upgrade ceph step failed... :("
when:
- ffu_upgrade_ceph is failed

View File

@ -1,32 +0,0 @@
---
- name: run ffu converge step
shell: |
source {{ undercloud_rc }}
{{ working_dir }}/ffu_upgrade_converge_script.sh 2>&1 {{ timestamper_cmd }} > \
{{ working_dir }}/ffu_upgrade_converge.log
register: ffu_converge
ignore_errors: true
- name: print stack failures
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack stack failures list --long {{ overcloud_stack_name }} 2>&1 {{ timestamper_cmd }} | \
tee {{ working_dir }}/ffu_upgrade_converge_failed.log
when:
- ffu_converge is failed
- not ephemeral_heat|bool
- name: print resource list
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack stack resource list --filter status=FAILED --nested-depth 5 {{ overcloud_stack_name }} \
2>&1 {{ timestamper_cmd }} | tee {{ working_dir }}/ffu_upgrade_converge_failed_resources.log
when:
- ffu_converge is failed
- not ephemeral_heat|bool
- name: was the ffu converge successful.
fail: msg="FFU converge step failed... :("
when: ffu_converge is failed

View File

@ -1,362 +0,0 @@
---
- name: create Undercloud ffu prepare scripts
include_tasks: create-undercloud-ffu-scripts.yaml
tags: create_ffu_prepare_scripts
when: >
ffu_noop|bool or
ffu_undercloud_upgrade|bool or
ffu_undercloud_os_upgrade|bool
- block:
- name: apply pre FFU undercloud OS upgrade workarounds
shell: |
set -o pipefail
./pre_ffu_undercloud_os_upgrade_workarounds.sh 2>&1 {{ timestamper_cmd }} >> pre_ffu_undercloud_os_upgrade_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
# At this stage we are running the source OSP version (OSP13), which does not
# support the new tripleo-validations framework.
- name: run pre-upgrade validation before upgrading RHEL
shell: |
set -o pipefail
{{ working_dir }}/pre-upgrade-validation.sh 2>&1 {{ timestamper_cmd }} >> validation-pre-upgrade.log
when: run_validations|bool
tags:
- upgrades_validations
- name: Check that all validations succeeded
lineinfile:
path: "{{ working_dir }}/validation-pre-upgrade.log"
regexp: "^.*Failure! The validation failed for all hosts:.*$"
state: absent
check_mode: true
register: validation_results
failed_when: validation_results is changed
tags:
- upgrades_validations
when: run_validations|bool
- name: Clean up httpd folder
become: true
become_user: root
file:
path: /etc/httpd
state: absent
- name: Upgrade operating system
become: true
become_user: root
import_tasks: ../common/undercloud_os_upgrade.yaml
when: not use_oooq|bool
- name: apply post FFU undercloud OS upgrade workarounds
shell: |
set -o pipefail
./post_ffu_undercloud_os_upgrade_workarounds.sh 2>&1 {{ timestamper_cmd }} >> post_ffu_undercloud_os_upgrade_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
when: ffu_undercloud_os_upgrade|bool
tags: ffu_undercloud_os_upgrade
- block:
- name: apply pre ffu undercloud upgrade workarounds
shell: |
set -o pipefail
./pre_ffu_undercloud_upgrade_workarounds.sh 2>&1 {{ timestamper_cmd }} >> pre_ffu_undercloud_upgrade_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
- name: configure container specific parameters
import_tasks: ../common/configure_uc_containers.yml
- name: install/update required packages before upgrading the undercloud
become: true
become_user: root
import_tasks: ../common/undercloud_prerequisites.yaml
- name: ffu undercloud upgrade
shell: |
set -o pipefail
./ffu_undercloud_upgrade.sh 2>&1 {{ timestamper_cmd }} >> ffu_undercloud_upgrade.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
- name: apply post ffu undercloud upgrade workarounds
shell: |
set -o pipefail
./post_ffu_undercloud_upgrade_workarounds.sh 2>&1 {{ timestamper_cmd }} >> post_ffu_undercloud_upgrade_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
when: ffu_undercloud_upgrade|bool
- name: create overcloud FFU prepare scripts
include_tasks: create-overcloud-ffu-scripts.yaml
args:
apply:
tags:
- create_ffu_prepare_scripts
tags: create_ffu_prepare_scripts
when: >
ffu_noop|bool or
ffu_overcloud_upgrade|bool
- block:
- name: Ensure ansible-pacemaker module is present in CI.
package:
name: ansible-pacemaker
state: latest
when: tripleo_ci|default(false)|bool
tags: ffu_upgrade_playbook
become: true
- name: launch workload
shell: |
set -o pipefail
{{ workload_launch_script }} 2>&1 {{ timestamper_cmd }} >> workload_launch.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: workload_launch|bool
- name: start L3 verification scripts
include_tasks: "{{ item }}"
args:
apply:
tags: ffu_overcloud_prepare
loop:
- '../common/l3_agent_connectivity_check_start_script.yml'
- '../common/l3_agent_failover_check_pre_script.yml'
tags: ffu_overcloud_prepare
# At this stage, the Undercloud is in targe version and the overcloud
# in source version. Therefore we can use the tripleo-validation new
# framework, but it is required to pass the python-interpreter
- name: run pre-upgrade validation for the overcloud nodes
import_tasks: ../common/validation_group_run.yaml
vars:
validation_group: "pre-upgrade"
validation_allowed_groups: "{{ upgrades_validations_groups }}"
when: run_validations|bool
tags:
- overcloud_upgrade_prepare
- upgrades_validations
- name: replace environment files
import_tasks: ../common/replace_environment_files.yaml
tags: ffu_overcloud_prepare
- name: adjust role-data for upgrade
include_tasks: ../common/adjust-roles-data.yaml
when: custom_roles_used|bool
tags: ffu_overcloud_prepare
- name: check customized {{ uc_containers_prepare_file }} exists
stat:
path: "{{ working_dir }}/{{ uc_containers_prepare_file }}"
register: custom_uc_containers
tags: ffu_overcloud_prepare
- name: Set the neutron_driver to ovn for the Overcloud upgrade if deployed with OVN
replace:
path: "{{ working_dir }}/{{ uc_containers_prepare_file }}"
regexp: '^(\s*neutron_driver\s*:)(.*)'
replace: '\1 ovn'
when:
- custom_uc_containers.stat.exists
- deployment_with_ovn
tags: ffu_overcloud_prepare
- name: apply pre ffu overcloud prepare workarounds
shell: |
set -o pipefail
./pre_ffu_overcloud_upgrade_prepare_workarounds.sh 2>&1 {{ timestamper_cmd }} >> pre_ffu_overcloud_upgrade_prepare_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
tags: ffu_overcloud_prepare
- name: run overcloud upgrade prepare
import_tasks: ../upgrade/overcloud_upgrade_prepare.yml
tags: ffu_overcloud_prepare
- name: apply post ffu overcloud prepare workarounds
shell: |
set -o pipefail
./post_ffu_overcloud_upgrade_prepare_workarounds.sh 2>&1 {{ timestamper_cmd }} >> post_ffu_overcloud_upgrade_prepare_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
tags: ffu_overcloud_prepare
- name: prepare containers for overcloud upgrade
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
shell: |
set -o pipefail
./overcloud_prepare_containers.sh {{ timestamper_cmd }} >> overcloud_upgrade_prepare_containers.log
tags: ffu_overcloud_prepare
- name: stop L3 verification scripts
include_tasks: "{{ item }}"
args:
apply:
tags: ffu_overcloud_prepare
loop:
- '../common/l3_agent_connectivity_check_stop_script.yml'
- '../common/l3_agent_failover_check_post_script.yml'
tags: ffu_overcloud_prepare
# This l3 check code is for now commented out for two reasons:
# 1) There is separate ping test in overcloud upgrade run which
# will be running at the same time, but the cleanup script runs
# kill -s INT $(pidof ping) which will kill them both and than
# we will fail on the final cleanup.
# 2) The workload creation and cleanup scripts work in simillar
# way. The workload cleanup does not check which workload we
# are cleaning specifically but it just removes first one on
# the list which in our case will be the host we are pinging
# here.
# - name: start L3 verification scripts
# include_tasks: "{{ item }}"
# args:
# apply:
# tags: ffu_overcloud_run
# loop:
# - '../common/l3_agent_connectivity_check_start_script.yml'
# - '../common/l3_agent_failover_check_pre_script.yml'
# tags: ffu_overcloud_run
############## OS upgrade + Overcloud node upgrade run ##############
- name: Start overcloud upgrade run with outage for all roles.
import_tasks: overcloud_upgrade_fast_and_furious.yaml
tags: ffu_overcloud_run
when: fast_and_furious|bool
############## OS upgrade + Overcloud node upgrade run ##############
- name: Start overcloud upgrade run for all roles.
import_tasks: overcloud_upgrade_roles.yaml
tags: ffu_overcloud_run
when: not fast_and_furious|bool
# - name: stop L3 verification scripts
# include_tasks: "{{ item }}"
# args:
# apply:
# tags: ffu_overcloud_run
# loop:
# - '../common/l3_agent_connectivity_check_stop_script.yml'
# - '../common/l3_agent_failover_check_post_script.yml'
# tags: ffu_overcloud_run
################ UPGRADE CONVERGE ###############
- name: apply pre ffu overcloud converge workarounds
shell: |
set -o pipefail
./pre_ffu_overcloud_converge_workarounds.sh 2>&1 {{ timestamper_cmd }} >> pre_ffu_overcloud_converge_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
tags: ffu_overcloud_converge
- name: import overcloud upgrade converge tasks
import_tasks: ../upgrade/overcloud_upgrade_converge.yml
tags: ffu_overcloud_converge
- name: apply post ffu overcloud converge workarounds
shell: |
set -o pipefail
./post_ffu_overcloud_converge_workarounds.sh 2>&1 {{ timestamper_cmd }} >> post_ffu_overcloud_converge_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
tags: ffu_overcloud_converge
############## CEPH upgrade ######################
- when: ceph_osd_enabled|bool
block:
- name: apply pre ffu ceph upgrade workarounds
shell: |
set -o pipefail
./pre_ffu_overcloud_ceph_workarounds.sh 2>&1 {{ timestamper_cmd }} >> pre_ffu_overcloud_ceph_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
tags: ffu_overcloud_ceph
- name: import ceph upgrade tasks
import_tasks: ../upgrade/ceph_upgrade_run.yml
tags: ffu_overcloud_ceph
# This l3 check code is for now commented out for two reasons:
# 1) There is separate ping test in overcloud upgrade run which
# will be running at the same time, but the cleanup script runs
# kill -s INT $(pidof ping) which will kill them both and than
# we will fail on the final cleanup.
# 2) The workload creation and cleanup scripts work in simillar
# way. The workload cleanup does not check which workload we
# are cleaning specifically but it just removes first one on
# the list which in our case will be the host we are pinging
# here.
#
# - name: start L3 verification scripts
# include_tasks: "{{ item }}"
# args:
# apply:
# tags: ffu_overcloud_ceph
# loop:
# - '../common/l3_agent_connectivity_check_start_script.yml'
# - '../common/l3_agent_failover_check_pre_script.yml'
# tags: ffu_overcloud_ceph
- name: apply post ffu ceph upgrade workarounds
shell: |
set -o pipefail
./post_ffu_overcloud_ceph_workarounds.sh 2>&1 {{ timestamper_cmd }} >> post_ffu_overcloud_ceph_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds|bool
tags: ffu_overcloud_ceph
# - name: stop L3 verification scripts
# include_tasks: "{{ item }}"
# args:
# apply:
# tags: ffu_overcloud_ceph
# loop:
# - '../common/l3_agent_connectivity_check_stop_script.yml'
# - '../common/l3_agent_failover_check_post_script.yml'
# tags: ffu_overcloud_ceph
############## POST UPGRADE ##############
- name: run controller post upgrade steps
include_tasks: ../upgrade/controller_post_upgrade.yml
args:
apply:
tags:
- ffu_overcloud_post
tags: ffu_overcloud_post
when: controller_upgrade_post|bool
- name: run post upgrade nova actions
import_tasks: ../common/nova_actions_check.yml
tags: ffu_overcloud_post
when: ffu_overcloud_upgrade|bool

View File

@ -1,21 +0,0 @@
---
- set_fact:
pcs_host: ""
- name: Create /var/lib/tripleo/transfer-flags/var-lib-mysql in bootstrap node
become: true
become_user: "{{ (overcloud_ssh_user) | ternary(overcloud_ssh_user, 'heat-admin') }}"
delegate_to: "{{ ((oc_roles_hosts|dict2items |
selectattr('key', 'in', inventory_rolemap['mysql'] | default([]))) |
map(attribute='value') | map('sort') | list | flatten(1))[0] }}"
shell: "sudo mkdir -p /var/lib/tripleo/transfer-flags && sudo touch /var/lib/tripleo/transfer-flags/var-lib-mysql"
- name: upgrade the whole overcloud
vars:
host: "{{ item | reject('none') | join(',') }}"
pcs_present: false
compute_present: false
oc_role_host_list: >-
{{ (oc_roles_hosts|dict2items | default([])) |
map(attribute='value') | map('sort') | flatten(1) }}
include_tasks: overcloud_upgrade_hosts.yaml
with_together: "{{ (oc_role_host_list | length == 0) | ternary([[], []], oc_role_host_list) + ['undercloud'] }}"

View File

@ -1,116 +0,0 @@
---
######## RHEL system upgrade ########
- name: apply pre ffu overcloud system upgrade workarounds in {{ host }}
shell: |
set -o pipefail
./pre_ffu_overcloud_os_upgrade_workarounds.sh \
--limit {{ host }} 2>&1 {{ timestamper_cmd }} >> pre_ffu_overcloud_os_upgrade_workarounds_{{ host }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds
tags: ffu_overcloud_system_upgrade
- name: Disable scheduling on {{ host }}
when:
- compute_present|bool
shell: |
set -o pipefail
source {{ overcloud_rc }}
HOST="$(openstack compute service list -f value -c Host | grep {{ host }})"
openstack compute service set --disable --disable-reason FFWD "$HOST" nova-compute
- name: run the pre upgrade script for the host {{ host }}
when:
- compute_present | bool
- (workload_launch | bool) or (workload_external | bool)
shell: |
set -o pipefail
./{{ host }}_upgrade_pre.sh 2>&1 {{ timestamper_cmd }} >> {{ host }}_upgrade_pre.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
tags: ffu_overcloud_system_upgrade
- name: run the RHEL upgrade steps for the host {{ host }}
shell: |
set -o pipefail
{{ overcloud_system_upgrade_script_base }}-{{ host }}.sh 2>&1 {{ timestamper_cmd }} >> \
{{ overcloud_system_upgrade_script_base }}-{{ host }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
register: overcloud_system_upgrade_res
ignore_errors: true
tags: ffu_overcloud_system_upgrade
- name: was the RHEL upgrade step successful.
fail: msg="Overcloud Operating System upgrade failed in {{ host }}."
when: overcloud_system_upgrade_res.rc != 0
tags: ffu_overcloud_system_upgrade
- name: apply post ffu overcloud system upgrade workarounds in {{ host }}
shell: |
set -o pipefail
./post_ffu_overcloud_os_upgrade_workarounds.sh --limit {{ host }} 2>&1 {{ timestamper_cmd }} >> \
post_ffu_overcloud_os_upgrade_workarounds_{{ host }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds
tags: ffu_overcloud_system_upgrade
######## overcloud upgrade run ########
- name: Enable scheduling on {{ host }}
when:
- compute_present|bool
shell: |
set -o pipefail
source {{ overcloud_rc }}
HOST="$(openstack compute service list -f value -c Host | grep {{ host }})"
openstack compute service set --enable "$HOST" nova-compute
- name: apply pre ffu overcloud upgrade workarounds in {{ host }}
shell: |
set -o pipefail
./pre_ffu_overcloud_upgrade_workarounds.sh --limit {{ host }} 2>&1 {{ timestamper_cmd }} >> \
pre_ffu_overcloud_upgrade_workarounds_{{ host }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds
tags: ffu_overcloud_upgrade_run
# HACK for ffwd upgrade
- set_fact:
upgrade_host: "{{ host + ',' + pcs_host }}"
- set_fact:
pcs_host: "{{ upgrade_host | regex_replace(',$', '') }}"
- name: run overcloud major upgrade for the host {{ pcs_host if pcs_present else host }}
shell: |
set -o pipefail
{{ overcloud_upgrade_run_script_base }}-{{ pcs_host if pcs_present else host }}.sh 2>&1 {{ timestamper_cmd }} >> \
{{ overcloud_upgrade_run_script_base }}-{{ pcs_host if pcs_present else host }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
register: overcloud_upgrade_res
ignore_errors: true
tags: ffu_overcloud_upgrade_run
- name: was the overcloud upgrade composable step successful.
fail: msg="Overcloud upgrade composable step failed for {{ host }}... :("
when: overcloud_upgrade_res.rc != 0
tags: ffu_overcloud_upgrade_run
- name: apply post ffu overcloud upgrade workarounds in {{ host }}
shell: |
set -o pipefail
./post_ffu_overcloud_upgrade_workarounds.sh --limit {{ host }} 2>&1 {{ timestamper_cmd }} >> \
post_ffu_overcloud_upgrade_workarounds_{{ host }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: ffu_upgrade_workarounds
tags: ffu_overcloud_upgrade_run

View File

@ -1,49 +0,0 @@
---
- set_fact:
pcs_host: ""
# The below filter chain does the following:
# - Get the list of roles NOT present in the nova_compute groups
# - Get the list of hosts in each role into an array
# - Sort the resulting list of hosts for each role
# - Return a list of host lists. eg: [['ctrl-1', 'ctrl2', 'ctrl-3'], ['DB1']]
# - OR if the role does not exist, it returns []
- set_fact:
oc_role_host_list_controlplane: >-
{{ (oc_roles_hosts|dict2items |
rejectattr('key', 'in', inventory_rolemap['nova_compute'] | default([]))) |
map(attribute='value') | map('sort') | list }}
# - Then with_together does a zip_longest to combine the list of lists,
# using None to fill the gaps. eg: [ctrl-1, DB1, ctrl2, None, ctrl-3, None]
# - We take care of the possible [] value by replacing it with two empty lists.
- name: upgrade controlplane {{ item }}
vars:
host: "{{ item | reject('none') | join(',') }}"
pcs_present: true
compute_present: false
include_tasks: overcloud_upgrade_hosts.yaml
with_together: "{{ (oc_role_host_list_controlplane | length == 0) | ternary([[], []], oc_role_host_list_controlplane) }}"
# The below filter chain does the following:
# - Get the list of roles present in the nova_compute group
# - Get the list of hosts in each role into an array
# - Sort the resulting list of hosts for each role
# - Return a list of host lists
# - OR if the role does not exist, it returns []
- set_fact:
oc_role_host_list_nova_compute: >-
{{ (oc_roles_hosts|dict2items |
selectattr('key', 'in', inventory_rolemap['nova_compute'] | default([]))) |
map(attribute='value') | map('sort') | list }}
# - Then with_together does a zip_longest to combine the list of lists,
# using None to fill the gaps.
# - We take care of the possible [] value by replacing it with two empty lists.
- name: upgrade computes {{ item }}
vars:
host: "{{ item | reject('none') | join(',') }}"
pcs_present: false
compute_present: true
include_tasks: overcloud_upgrade_hosts.yaml
with_together: "{{ (oc_role_host_list_nova_compute | length == 0) | ternary([[], []], oc_role_host_list_nova_compute) }}"

View File

@ -1,63 +0,0 @@
---
# tasks file for tripleo-upgrade
- name: prepare workload scripts
include_tasks:
file: common/create_workload.yml
apply:
tags: always
when: workload_launch|bool or launch_sanity_workload|bool
tags: always
- name: install packages required to apply workaround
become: true
become_user: root
package:
name:
- patchutils
- patch
- curl
state: latest
tags: always
- name: launch workload
shell: |
set -o pipefail
{{ workload_launch_script }} 2>&1 {{ timestamper_cmd }} >> workload_launch.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: workload_launch|bool
tags: always
- include_tasks: upgrade/main.yml
when: >
upgrade_noop|bool or
undercloud_upgrade|bool or
undercloud_os_upgrade|bool or
overcloud_upgrade|bool
tags: always
- include_tasks: update/main.yml
when: >
update_noop|bool or
undercloud_update|bool or
overcloud_update|bool
tags: always
- include_tasks: fast-forward-upgrade/main.yml
when: >
ffu_noop|bool or
ffu_undercloud_os_upgrade|bool or
ffu_undercloud_upgrade|bool or
ffu_overcloud_upgrade|bool
tags: always
- name: workload cleanup
shell: |
set -o pipefail
{{ workload_launch_script }} cleanup 2>&1 {{ timestamper_cmd }} >> workload_cleanup.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: workload_cleanup|bool

View File

@ -1,106 +0,0 @@
---
- name: Load variables from container environment file
slurp:
src: "{{ working_dir }}/{{ uc_containers_prepare_file }}"
register: container_env
- name: Set required ceph image facts from container environment file
set_fact:
ceph_image_tag: "{{ (container_env.content|b64decode|from_yaml).parameter_defaults.ContainerImagePrepare[0].set.ceph_tag }}"
ceph_image: "{{ (container_env.content|b64decode|from_yaml).parameter_defaults.ContainerImagePrepare[0].set.ceph_image }}"
ceph_namespace: "{{ (container_env.content|b64decode|from_yaml).parameter_defaults.ContainerImagePrepare[0].set.ceph_namespace.split('/')[1] }}"
container_registry: "{{ undercloud_short_host_name }}.{{ docker_registry_network }}.{{ undercloud_domain }}:8787"
- name: Get Ceph cluster health status
shell: cephadm shell ceph -s -f json | jq .health.status -r
register: ceph_status
become: true
become_user: root
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
- name: Check if ceph update is required
shell: cephadm shell ceph orch upgrade check {{ image }} | jq .needs_update -r
vars:
image: "{{ container_registry }}/{{ ceph_namespace }}/{{ ceph_image }}:{{ ceph_image_tag }}"
register: ceph_needs_update
become: true
become_user: root
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
- name: Fail before ceph update when ceph cluster is unhealthy
fail:
msg: Ceph update failed because ceph cluster is unhealthy
when:
- ceph_status.stdout != "HEALTH_OK"
- ceph_needs_update.stdout != "{}"
- name: Exclude ceph images from container check when ceph update not required
lineinfile:
path: "{{ log_playbook_script }}-before_reboot.sh"
regexp: "^EXCLUDED_CONTAINERS_FROM_CHECK="
line: "EXCLUDED_CONTAINERS_FROM_CHECK=${2:-{{ excluded_containers_from_check }},ceph}"
state: present
backup: true
when: ceph_needs_update.stdout == "{}"
- block:
- name: import tasks from l3_agent_connectivity_check_start_script
import_tasks: ../common/l3_agent_connectivity_check_start_script.yml
- name: Start Ceph update using cephadm
command:
cmd: >
cephadm shell --
ceph orch upgrade start --image {{ image }}
vars:
image: "{{ container_registry }}/{{ ceph_namespace }}/{{ ceph_image }}:{{ ceph_image_tag }}"
become: true
become_user: root
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
- name: Wait while ceph update is in progress
shell: |
set -o pipefail
cephadm shell -- ceph orch upgrade status | jq .in_progress -r
changed_when: false
register: ceph_upgrade_progress
retries: "{{ ceph_update_timeout }}"
delay: 60
until:
- ceph_upgrade_progress.stdout == "false"
become: true
become_user: root
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
- name: Check update status
shell: |
set -o pipefail
cephadm shell -- ceph log last cephadm | grep 'Upgrade: Complete!'
become: true
become_user: root
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
register: ceph_update_status
- name: Fail Ceph update
fail:
msg: Ceph minor update failed
when: ceph_update_status.rc != 0
when: ceph_needs_update.stdout != "{}"
rescue:
- name: print Ceph update failures
command:
cmd: >
cephadm shell --
ceph log last cephadm
become: true
become_user: root
delegate_to: "{{ inventory_hostmap[controller_role_name]|first }}"
- name: Fail Ceph update
fail:
msg: Ceph minor update failed
always:
- name: import tasks from l3_agent_connectivity_check_stop_script
import_tasks: ../common/l3_agent_connectivity_check_stop_script.yml
vars:
current_stage_error: "{{ update_loss_threshold }}"

View File

@ -1,131 +0,0 @@
---
- name: Ensure we skip Rhel Enforcement
include_tasks: ../common/skip_rhel_enforcement.yaml
when: not ( enforce_rhel|bool )
- name: create a comptatible deployment scripts from oooq
include_tasks: ../upgrade/use_oooq.yaml
args:
apply:
tags:
- use_oooq
tags: use_oooq
when: use_oooq|bool
- name: Convert CLI options to parameters
import_tasks: ../common/convert_cli_opts_params.yaml
- name: get auxiliary facts for major upgrades
import_tasks: ../common/auxilary-facts.yaml
- name: Build extra templates based on new options.
import_tasks: ../common/build_extra_template.yaml
when: config_heat_extra_yaml is defined
- name: create undercloud update script
template:
src: "undercloud_upgrade.sh.j2"
dest: "{{ undercloud_update_script }}"
mode: 0775
force: true
- name: create scripts with workarounds
template:
src: "workarounds.sh.j2"
dest: "{{ working_dir }}/{{ item }}.sh"
mode: 0755
force: true
loop:
- 'pre_overcloud_update_prepare_workarounds'
- 'post_overcloud_update_prepare_workarounds'
- 'pre_overcloud_update_run_workarounds'
- 'post_overcloud_update_run_workarounds'
when: updates_workarounds|bool
- name: include l3 agent tasks
import_tasks: ../common/create_l3_agent_connectivity_check_script.yml
- name: generate roles list from inventory file
import_tasks: ../common/load_roles_from_inventory.yaml
- name: create overcloud update prepare script
vars:
old_img: "{{ working_dir }}/{{ container_registry_file }}"
template:
src: "overcloud_update_prepare.sh.j2"
dest: "{{ overcloud_update_prepare_script }}"
mode: 0775
force: true
- name: create overcloud ovn external update
template:
src: "ovn-external-update.sh.j2"
dest: "{{ working_dir }}/ovn-external-update.sh"
mode: 0775
force: true
- name: create overcloud_prepare_containers.sh script
template:
src: overcloud_prepare_containers.sh.j2
dest: "{{ working_dir }}/overcloud_prepare_containers.sh"
mode: 0755
force: true
- name: Create update log collection scripts
include_tasks: ../common/create_log_collection_scripts.yml
vars:
log_current_stage: '{{ item }}'
when:
- log_stages|bool
loop:
- before_ovn_controller_update
- before_ceph_update
- before_reboot
- name: Create update run log collection for oc update run - batch
include_tasks: ../common/create_log_collection_scripts.yml
vars:
log_current_stage: 'before_oc_update_run'
when:
- overcloud_batch_update|bool
- log_stages|bool
- name: Create update run log collection for oc update run - serial
include_tasks: ../common/create_log_collection_scripts.yml
vars:
log_current_stage: "before_oc_update_run_{{ item }}"
when:
- not overcloud_batch_update|bool
- log_stages|bool
loop: "{{ oc_roles|default(['all']) }}"
- name: create overcloud update script
template:
src: "overcloud_update_run.sh.j2"
dest: "{{ overcloud_update_run_script_base }}-{{ item }}.sh"
mode: 0775
force: true
loop: "{{ oc_roles|default(['all']) }}"
- name: create online data migration script
template:
src: "overcloud_online_data_migration.sh.j2"
dest: "{{ working_dir }}/overcloud_online_data_migration.sh"
mode: 0755
force: true
- name: create docker container/images validate scripts
template:
src: "validate_docker_images_versions.sh.j2"
dest: "{{ overcloud_validate_images_script }}"
mode: 0755
force: true
- name: Create L3-agent failover scripts
import_tasks: ../common/create_l3_agent_failover_check_script.yml
- name: create nova actions check script
import_tasks: ../common/create_nova_actions_check_script.yml
- name: import create HTTP test scripts
import_tasks: ../common/create_http_test_scripts.yml

View File

@ -1,29 +0,0 @@
---
- name: Ensure we skip Rhel Enforcement
include_tasks: ../common/skip_rhel_enforcement.yaml
when: not ( enforce_rhel|bool )
- name: create undercloud update script
template:
src: undercloud_upgrade.sh.j2
dest: "{{ undercloud_update_script }}"
mode: 0775
force: true
- name: create update workaround scripts for undercloud
template:
src: workarounds.sh.j2
dest: "{{ working_dir }}/{{ item }}.sh"
mode: 0775
force: true
loop:
- 'pre_undercloud_update_workarounds'
- 'post_undercloud_update_workarounds'
when: updates_workarounds|bool
- name: collect log for the current stage - batch
include_tasks: ../common/create_log_collection_scripts.yml
vars:
log_current_stage: 'before_undercloud_update'
when:
- log_stages|bool

View File

@ -1,23 +0,0 @@
---
- name: verify if fencing is enabled before running overcloud update
become: true
become_user: "{{ (overcloud_ssh_user) | ternary(overcloud_ssh_user, 'tripleo-admin') }}"
delegate_to: "{{ controller_host }}"
shell: "sudo pcs property config stonith-enabled | tail -1"
register: pcs_out
changed_when: false
failed_when: false
- block:
- name: set EnableFencing to false in fencing.yaml
replace:
path: "{{ working_dir }}/fencing.yaml"
regexp: "EnableFencing: true"
replace: "EnableFencing: false"
- name: disable fencing before running update
become: true
become_user: "{{ (overcloud_ssh_user) | ternary(overcloud_ssh_user, 'tripleo-admin') }}"
delegate_to: "{{ controller_host }}"
shell: "sudo pcs property set stonith-enabled=false"
when: pcs_out.stdout.find('true') != -1

View File

@ -1,28 +0,0 @@
---
- name: Check if fencing.yaml file is present
stat:
path: "{{ working_dir }}/fencing.yaml"
register: fencing_file
- name: verify the fencing status after update
become: true
become_user: "{{ (overcloud_ssh_user) | ternary(overcloud_ssh_user, 'tripleo-admin') }}"
delegate_to: "{{ controller_host }}"
shell: "sudo pcs property config stonith-enabled | tail -1"
register: pcs_out
changed_when: false
failed_when: false
- block:
- name: set EnableFencing to true in fencing.yaml
replace:
path: "{{ working_dir }}/fencing.yaml"
regexp: "EnableFencing: false"
replace: "EnableFencing: true"
- name: enable fencing after running update
become: true
become_user: "{{ (overcloud_ssh_user) | ternary(overcloud_ssh_user, 'tripleo-admin') }}"
delegate_to: "{{ controller_host }}"
shell: "sudo pcs property set stonith-enabled=true"
when: pcs_out.stdout.find('false') != -1 and fencing_file.stat.exists

View File

@ -1,265 +0,0 @@
---
- name: create undercloud update scripts
include_tasks: create-undercloud-update-scripts.yaml
args:
apply:
tags:
- create_undercloud_update_scripts
tags: create_undercloud_update_scripts
when: update_noop|bool or undercloud_update|bool
- block:
- name: apply pre undercloud update workarounds
shell: |
set -o pipefail
./pre_undercloud_update_workarounds.sh 2>&1 {{ timestamper_cmd }} >> \
pre_undercloud_update_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: updates_workarounds|bool
tags:
- undercloud_update
- name: install/update required packages before updating the undercloud
become: true
become_user: root
import_tasks: ../common/undercloud_prerequisites.yaml
- name: run pre-update validation
import_tasks: ../common/validation_group_run.yaml
vars:
validation_group: "pre-update"
validation_allowed_groups: "{{ updates_validations_groups }}"
when: run_validations|bool
tags:
- undercloud_update
- updates_validations
- pre_update_validations
- name: collect log before undercloud update
include_tasks: ../common/trigger_log.yml
vars:
log_current_stage: 'before_undercloud_update'
when:
- log_stages|bool
- name: update undercloud
shell: |
set -o pipefail
{{ undercloud_update_script }} 2>&1 {{ timestamper_cmd }} >> \
undercloud_update.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
tags: undercloud_update
- name: apply post undercloud update workarounds
shell: |
set -o pipefail
./post_undercloud_update_workarounds.sh 2>&1 {{ timestamper_cmd }} >> \
post_undercloud_update_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: updates_workarounds|bool
tags:
- undercloud_update
- name: validate undercloud update
import_tasks: ../common/undercloud_validate_upgrade.yaml
tags: undercloud_update_validate
when: undercloud_update|bool
- name: create overcloud update scripts
include_tasks: create-overcloud-update-scripts.yaml
args:
apply:
tags:
- create_overcloud_update_scripts
tags: create_overcloud_update_scripts
when: update_noop|bool or overcloud_update|bool
- block:
- name: apply pre overcloud update prepare workarounds
shell: |
set -o pipefail
./pre_overcloud_update_prepare_workarounds.sh 2>&1 {{ timestamper_cmd }} >> \
pre_overcloud_update_prepare_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: updates_workarounds|bool
tags:
- overcloud_update_prepare
- name: run pre-update-prepare validation
import_tasks: ../common/validation_group_run.yaml
vars:
validation_group: "pre-update-prepare"
validation_allowed_groups: "{{ updates_validations_groups }}"
when: run_validations|bool
tags:
- overcloud_update_prepare
- updates_validations
- pre_update_prepare_validations
- set_fact:
controller_host: "{{ inventory_hostmap[controller_role_name]|first }}"
- name: run pre-update fencing check
import_tasks: disable_fencing.yaml
- name: import oc update prepare tasks
import_tasks: overcloud_update_prepare.yml
tags:
- overcloud_update_prepare
- name: apply post overcloud update prepare workarounds
shell: |
set -o pipefail
./post_overcloud_update_prepare_workarounds.sh 2>&1 {{ timestamper_cmd }} >> \
post_overcloud_update_prepare_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: updates_workarounds|bool
tags:
- overcloud_update_prepare
- name: prepare containers for overcloud update
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
shell: |
set -o pipefail
./overcloud_prepare_containers.sh 2>&1 {{ timestamper_cmd }} >> \
overcloud_update_prepare_containers.log
tags:
- overcloud_update_prepare_containers
- name: collect log before OVN controller update
include_tasks: ../common/trigger_log.yml
vars:
log_current_stage: 'before_ovn_controller_update'
when:
- log_stages|bool
- name: Update OVN controllers.
shell: |
set -o pipefail
./ovn-external-update.sh 2>&1 {{ timestamper_cmd }} >> \
ovn-external-update.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
tags:
- overcloud_update_run
- name: Ensure ansible-pacemaker module is present in CI.
package:
name: ansible-pacemaker
state: latest
when: tripleo_ci|default(false)|bool
become: true
tags:
- overcloud_update_run
- name: apply pre overcloud update run workarounds
shell: |
set -o pipefail
./pre_overcloud_update_run_workarounds.sh 2>&1 {{ timestamper_cmd }} >> \
pre_overcloud_update_run_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: updates_workarounds|bool
tags:
- overcloud_update_run
- name: run pre-update-run validation
import_tasks: ../common/validation_group_run.yaml
vars:
validation_group: "pre-update-run"
validation_allowed_groups: "{{ updates_validations_groups }}"
when: run_validations|bool
tags:
- overcloud_update_run
- updates_validations
- pre_update_run_validations
- name: import overcloud update run tasks
import_tasks: overcloud_update_run.yml
tags:
- overcloud_update_run
- name: apply post overcloud update run workarounds
shell: |
set -o pipefail
./post_overcloud_update_run_workarounds.sh 2>&1 {{ timestamper_cmd }} >> \
post_overcloud_update_run_workarounds.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: updates_workarounds|bool
tags:
- overcloud_update_run
- name: collect log before ceph update
include_tasks: ../common/trigger_log.yml
vars:
log_current_stage: 'before_ceph_update'
when:
- log_stages|bool
- name: update Ceph
import_tasks: ceph_update_run.yml
when: ceph_osd_enabled|bool
tags:
- ceph_update_run
- name: run online data migration
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
shell: |
set -o pipefail
./overcloud_online_data_migration.sh 2>&1 {{ timestamper_cmd }} >> \
overcloud_update_data_migration.log
- name: run post-update fencing check
import_tasks: enable_fencing.yaml
- name: collect log after update, but before reboot.
include_tasks: ../common/trigger_log.yml
vars:
log_current_stage: 'before_reboot'
when:
- log_stages|bool
- name: run post-update validation
import_tasks: ../common/validation_group_run.yaml
vars:
validation_group: "post-update"
validation_allowed_groups: "{{ updates_validations_groups }}"
when: run_validations|bool
tags:
- updates_validations
- post_update_validations
- name: validate overcloud docker images/containers
shell: |
set -o pipefail
{{ overcloud_validate_images_script }} 2>&1 {{ timestamper_cmd }} >> \
validate_oc_images_containers.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when: overcloud_images_validate|bool
tags:
- overcloud_images_validate
- name: import nova_actions_check tasks
import_tasks: ../common/nova_actions_check.yml
when: overcloud_update|bool

View File

@ -1,41 +0,0 @@
---
- name: setup HEAT outputs via update prepare
shell: |
set -o pipefail
source {{ undercloud_rc }}
{{ overcloud_update_prepare_script }} 2>&1 {{ timestamper_cmd }} >> \
overcloud_update_prepare.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
register: overcloud_update_prepare
ignore_errors: true
- block:
- name: print stack failures
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack stack failures list --long {{ overcloud_stack_name }} {{ timestamper_cmd }} | \
tee -a overcloud_failed_update_prepare.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
- name: print resource list
shell: |
set -o pipefail
source {{ undercloud_rc }}
openstack stack resource list --filter status=FAILED --nested-depth 5 {{ overcloud_stack_name }} {{ timestamper_cmd }} | \
tee -a overcloud_failed_prepare_resources.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
when:
- overcloud_update_prepare.rc != 0
- not ephemeral_heat|bool
- name: was the overcloud update prepare successful.
fail: msg="Overcloud minor update preparation step failed..."
when:
- overcloud_update_prepare.rc != 0

View File

@ -1,22 +0,0 @@
---
- name: collect log for the current stage - batch
include_tasks: ../common/trigger_log.yml
vars:
log_current_stage: 'before_oc_update_run'
when:
- overcloud_batch_update|bool
- log_stages|bool
- name: Are we running in parallel or serially ?
debug:
msg: "{{ (overcloud_batch_update|bool) | ternary('Running in parallel', 'Running serially') }}"
- name: generate roles list from inventory file
import_tasks: ../common/load_roles_from_inventory.yaml
when: not oc_roles
- name: Run update.
vars:
oc_current_role: "{{ item }}"
include_tasks: overcloud_update_run_role.yml
loop: "{{ oc_roles|default(['all'])|batch((overcloud_batch_update|bool) | ternary(100, 1))|list }}"

View File

@ -1,42 +0,0 @@
---
- name: import tasks from l3_agent_connectivity_check_start_script
import_tasks: ../common/l3_agent_connectivity_check_start_script.yml
- name: collect log for the current stage - serial
include_tasks: ../common/trigger_log.yml
vars:
log_current_stage: "before_oc_update_run_{{ oc_current_role[0] }}"
when:
- not overcloud_batch_update|bool
- log_stages|bool
- name: run overcloud minor update in each of the roles/hostgroups
async: 25200
poll: 0
shell: |
set -o pipefail
{{ overcloud_update_run_script_base }}-{{ oc_running_role }}.sh 2>&1 {{ timestamper_cmd }} >> \
{{ overcloud_update_run_script_base }}-{{ oc_running_role }}.log
args:
chdir: "{{ working_dir }}"
executable: /usr/bin/bash
loop: "{{ oc_current_role }}"
loop_control:
loop_var: "oc_running_role"
register: overcloud_update_nodes
ignore_errors: true
- name: was the overcloud minor update successful?
async_status:
jid: "{{ async_result_item.ansible_job_id }}"
loop: "{{ overcloud_update_nodes.results }}"
loop_control:
loop_var: "async_result_item"
register: async_poll_results
until: async_poll_results.finished
retries: 25200
- name: import tasks from l3_agent_connectivity_check_stop_script
import_tasks: ../common/l3_agent_connectivity_check_stop_script.yml
vars:
current_stage_error: "{{ update_loss_threshold }}"

Some files were not shown because too many files have changed in this diff Show More