diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 23d614a26..000000000 --- a/.coveragerc +++ /dev/null @@ -1,8 +0,0 @@ -[run] -branch = True -source = tripleoclient -omit = tripleoclient/tests/* - -[report] -ignore_errors = True -show_missing = True \ No newline at end of file diff --git a/.gitignore b/.gitignore deleted file mode 100644 index ee4e1236a..000000000 --- a/.gitignore +++ /dev/null @@ -1,74 +0,0 @@ -# Add patterns in here to exclude files created by tools integrated with this -# repository, such as test frameworks from the project's recommended workflow, -# rendered documentation and package builds. -# -# Don't add patterns to exclude files created by preferred personal tools -# (editors, IDEs, your operating system itself even). These should instead be -# maintained outside the repository, for example in a ~/.gitignore file added -# with: -# -# git config --global core.excludesfile '~/.gitignore' - -# Bytecompiled Python -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -.eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt -install-undercloud.log - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.testrepository -.stestr/* -cover/* -MagicMock/* - -# Translations -*.mo - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -# Files created by releasenotes build -releasenotes/build - -# generated config samples -*.conf.sample - -# VSCode -.vscode/* -.devcontainer/* diff --git a/.mailmap b/.mailmap deleted file mode 100644 index cc92f17b8..000000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index f41dd121b..000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -repos: - - repo: https://github.com/pycqa/flake8.git - rev: 3.8.4 - hooks: - - id: flake8 - language_version: python3 - args: ['--config=setup.cfg'] - pass_filenames: false - - repo: https://github.com/pre-commit/mirrors-pylint - rev: v2.6.0 - hooks: - - id: pylint diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 779e8a8c5..000000000 --- a/.pylintrc +++ /dev/null @@ -1,78 +0,0 @@ -[MESSAGES CONTROL] - -disable = - # TODO(ssbarnea): remove temporary skips added during initial adoption: - arguments-differ, - assignment-from-no-return, - attribute-defined-outside-init, - broad-except, - consider-iterating-dictionary, - consider-merging-isinstance, - consider-using-dict-comprehension, - consider-using-in, - consider-using-set-comprehension, - dangerous-default-value, - deprecated-method, - duplicate-code, - expression-not-assigned, - fixme, - global-statement, - import-error, - import-outside-toplevel, - inconsistent-return-statements, - invalid-name, - line-too-long, - logging-format-interpolation, - logging-not-lazy, - lost-exception, - missing-class-docstring, - missing-function-docstring, - missing-module-docstring, - no-else-break, - no-else-continue, - no-else-raise, - no-member, - no-self-use, - no-value-for-parameter, - protected-access, - raise-missing-from, - redefined-argument-from-local, - redefined-builtin, - redefined-outer-name, - reimported, - self-assigning-variable, - simplifiable-if-statement, - super-init-not-called, - super-with-arguments, - superfluous-parens, - too-few-public-methods, - too-many-ancestors, - too-many-arguments, - too-many-branches, - too-many-instance-attributes, - too-many-lines, - too-many-locals, - too-many-nested-blocks, - too-many-public-methods, - too-many-return-statements, - too-many-statements, - trailing-comma-tuple, - try-except-raise, - undefined-loop-variable, - ungrouped-imports, - unidiomatic-typecheck, - unnecessary-comprehension, - unnecessary-lambda, - unnecessary-pass, - unsubscriptable-object, - unused-argument, - unused-import, - unused-variable, - useless-else-on-loop, - useless-object-inheritance, - useless-super-delegation, - wrong-import-order, - wrong-import-position - -[REPORTS] -output-format = colorized diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index b8379e251..000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${TEST_PATH:-./tripleoclient/tests} -top_dir=./ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index f8a60d601..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,12 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in the "If you're a developer, start here" -section of `How To Contribute Guide `_. - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the documented `Development Workflow `_. - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on `TripleO at Launchpad `_ -not GitHub. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db85882..000000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/README.rst b/README.rst index 4e5f7e7ce..4ee2c5f13 100644 --- a/README.rst +++ b/README.rst @@ -1,23 +1,10 @@ -=================== -About tripleoclient -=================== +This project is no longer maintained. -General information -------------------- +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -**tripleoclient** is an OpenStackClient (OSC) plugin implementation that -implements commands useful for TripleO and the install and management of -both an undercloud and an overcloud. - -See the -`TripleO Documentation `_ -for details on using tripleoclient. - -See the -`Release Notes `_ - -Team and repository tags ------------------------- - -.. image:: https://governance.openstack.org/tc/badges/python-tripleoclient.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 87cbb7797..000000000 --- a/bindep.txt +++ /dev/null @@ -1,14 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed by tests; -# see https://docs.openstack.org/infra/bindep/ for additional information. - -hostname [platform:rpm test] -xfsprogs [platform:rpm test] -qemu-img [platform:rpm test] -dosfstools [platform:rpm test] -libffi-dev [platform:dpkg] -libffi-devel [platform:rpm] -libssl-dev [platform:dpkg test] -openssl-devel [platform:rpm test] -policycoreutils-python [platform:rpm test !platform:rhel-8 !platform:centos-8 !platform:fedora !platform:centos-9 !platform:rhel-9] -policycoreutils-python-utils [platform:rpm test !platform:rhel-7 !platform:centos-7] -tripleo-ansible [platform:rpm] diff --git a/config-generator/standalone.conf b/config-generator/standalone.conf deleted file mode 100644 index 46e272c44..000000000 --- a/config-generator/standalone.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -output_file = standalone.conf.sample -namespace = standalone_config diff --git a/config-generator/undercloud.conf b/config-generator/undercloud.conf deleted file mode 100644 index af79e55d2..000000000 --- a/config-generator/undercloud.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -output_file = undercloud.conf.sample -namespace = undercloud_config diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 640fbd022..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -sphinx>=2.0.0,!=2.1.0 # BSD -sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD -openstackdocstheme>=2.2.1 # Apache-2.0 -reno>=3.1.0 # Apache-2.0 diff --git a/doc/source/commands.rst b/doc/source/commands.rst deleted file mode 100644 index ac6ec0666..000000000 --- a/doc/source/commands.rst +++ /dev/null @@ -1,31 +0,0 @@ -====================== -All Overcloud Commands -====================== - -.. autoprogram-cliff:: openstack.tripleoclient.v2 - :command: overcloud * - -================== -Container Commands -================== - -.. autoprogram-cliff:: openstack.tripleoclient.v2 - :command: tripleo container * - -=================== -Undercloud Commands -=================== - -.. autoprogram-cliff:: openstack.tripleoclient.v2 - :command: undercloud * - -=================== -Standalone Commands -=================== - -.. autoprogram-cliff:: openstack.tripleoclient.v2 - :command: tripleo deploy - -.. autoprogram-cliff:: openstack.tripleoclient.v2 - :command: tripleo upgrade - diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index a3c9f774e..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'openstackdocstheme', - 'cliff.sphinxext', - 'sphinxcontrib.rsvgconverter', -] - -# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 -latex_use_xindy = False - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2017 Red Hat, Inc.' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -suppress_warnings = ['image.nonlocal_uri'] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tripleoclientdoc' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - 'doc-python-tripleoclient.tex', - 'Tripleoclient Documentation', - 'OpenStack Foundation', 'manual'), -] - -# Allow deeper levels of nesting for \begin...\end stanzas -latex_elements = {'maxlistdepth': 10, 'extraclassoptions': ',openany,oneside'} - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/python-tripleoclient' -openstackdocs_pdf_link = True -openstackdocs_bug_project = 'python-tripleoclient' -openstackdocs_bug_tag = '' - -# Last updated timestamp -autoprogram_cliff_application = 'openstack' diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index a1d5a6e16..000000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,4 +0,0 @@ -Contributing -============ - -.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 6371f78ab..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -============= -Tripleoclient -============= - -.. toctree:: - :maxdepth: 2 - - readme - contributing - installation - usage - commands - -.. only:: html - - Indices and tables - ================== - - * :ref:`genindex` - * :ref:`search` diff --git a/doc/source/installation.rst b/doc/source/installation.rst deleted file mode 100644 index bd636c476..000000000 --- a/doc/source/installation.rst +++ /dev/null @@ -1,12 +0,0 @@ -============ -Installation -============ - -At the command line:: - - $ pip install python-tripleoclient - -Or, if you have virtualenvwrapper installed:: - - $ mkvirtualenv python-tripleoclient - $ pip install python-tripleoclient diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d8..000000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/usage.rst b/doc/source/usage.rst deleted file mode 100644 index 204cf8ec1..000000000 --- a/doc/source/usage.rst +++ /dev/null @@ -1,7 +0,0 @@ -===== -Usage -===== - -To use tripleoclient in a project:: - - import tripleoclient diff --git a/releasenotes/notes/5.8.0-9f2df7e7dfcfbc42.yaml b/releasenotes/notes/5.8.0-9f2df7e7dfcfbc42.yaml deleted file mode 100644 index 948b25579..000000000 --- a/releasenotes/notes/5.8.0-9f2df7e7dfcfbc42.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -prelude: > - 6.0.0 is the final release for Ocata. - It's the first release where release notes are added. -features: - - Use the overcloudrc generated in a Mistral action so both - CLI and UI can use the same file when interacting with the - deployed OpenStack. - - Default image build to use yaml files. - If no args are given to the overcloud image build, it will - default to using the CentOS yaml files in - /usr/share/openstack-tripleo-common/image-yaml. - - Simplify fetching the passwords from Mistral. - - Add --disable-validations argument that will disable the run - of validations completely. This feature is useful when we deploy - TripleO with multinode and deployed-server features. -fixes: - - Fixes `bug 1649588 - `__ so exceptions are not - swallowed anymore which was leading to 0 exit code. - Now, it returns a proper exit code when plan deletion fails. - - Add new hiera agent hook to legacy image build. - The change Ia1864933235152b7e899c4442534879f8e22240d added these to the - newer overcloud-images.yaml method of building images. Unfortunately - because the old 'openstack overcloud image build --all' method does not - leverage this file yet, it leads to a timeout in deployment because the - heat agents are not available. - It fixes `bug 1651616 - `__. - - Fixes `bug 1637474 - `__ so we manage - --initial-state flag for register_or_update. - - Fixes `bug 1648861 - `__ so that if the plan - creation fails, the Swift container isn't created. -other: - - Remove the keystone_pki cert generation, which is not needed anymore. diff --git a/releasenotes/notes/Add_parameters_and_create-vars-file_arguments_to_the_list_subcommand-2e0944e5440c4216.yaml b/releasenotes/notes/Add_parameters_and_create-vars-file_arguments_to_the_list_subcommand-2e0944e5440c4216.yaml deleted file mode 100644 index 87fe3223d..000000000 --- a/releasenotes/notes/Add_parameters_and_create-vars-file_arguments_to_the_list_subcommand-2e0944e5440c4216.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The 'openstack tripleo validator list' subcommand can only display all the - available parameters for the validations using the new --parameters - argument and extract them to a file using the new --create-vars-file argument. diff --git a/releasenotes/notes/Allow-running-validations-by-name-using-Ansible-by-default-2dac0dfd9c7a4690.yaml b/releasenotes/notes/Allow-running-validations-by-name-using-Ansible-by-default-2dac0dfd9c7a4690.yaml deleted file mode 100644 index 9c2b6328d..000000000 --- a/releasenotes/notes/Allow-running-validations-by-name-using-Ansible-by-default-2dac0dfd9c7a4690.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - The validations can now be performed by calling Mistral or by calling - ``ansible-playbook``. By default, the latter is used. The new ``--use-mistral`` - option allows to execute either groups or a set of specific validations by - calling Mistral instead of using the default mechanism, ie. ``ansible-playbook``. - diff --git a/releasenotes/notes/Swift-encryption-for-the-undercloud-9d9e62205fc54531.yaml b/releasenotes/notes/Swift-encryption-for-the-undercloud-9d9e62205fc54531.yaml deleted file mode 100644 index b35b81901..000000000 --- a/releasenotes/notes/Swift-encryption-for-the-undercloud-9d9e62205fc54531.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The option `enable_swift_encryption` was added to the containerized - undercloud configuration options (undercloud.conf). If enabled, it will - deploy Barbican, which will be used to enable Swift Object encryption. diff --git a/releasenotes/notes/TLS-by-default-for-undercloud_config-f8cdcf206de51b3c.yaml b/releasenotes/notes/TLS-by-default-for-undercloud_config-f8cdcf206de51b3c.yaml deleted file mode 100644 index 7bde30e6b..000000000 --- a/releasenotes/notes/TLS-by-default-for-undercloud_config-f8cdcf206de51b3c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - TLS is now used by default for the containerized undercloud. This is done - by setting the ``generate_service_certificate`` parameter to True by - default. diff --git a/releasenotes/notes/add-architecture-option-1fca9e53bd59d353.yaml b/releasenotes/notes/add-architecture-option-1fca9e53bd59d353.yaml deleted file mode 100644 index 1c59a6cb3..000000000 --- a/releasenotes/notes/add-architecture-option-1fca9e53bd59d353.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - In order to allow overcloud and deploy images to vary based on architecture - add a ``--architecture`` option to ``openstack overcloud image upload``. - This option will add hw_architecture to the image meta-data, which will - then be used my nova to limit node selection to matching CPU architectures. diff --git a/releasenotes/notes/add-check-for-disable-upgrade-deployment-flag-f074554e47e85b27.yaml b/releasenotes/notes/add-check-for-disable-upgrade-deployment-flag-f074554e47e85b27.yaml deleted file mode 100644 index dc796b7da..000000000 --- a/releasenotes/notes/add-check-for-disable-upgrade-deployment-flag-f074554e47e85b27.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - If the operator specifies a roles_data.yaml for the deployment or upgrade - this adds a check that the disable_upgrade_deployment flag is set at least - once in that file and otherwise logs a warning. If the - validation-warnings-fatal parameter is set to True (default is False) then - this check will also raise an InvalidConfiguration exception. - diff --git a/releasenotes/notes/add-enable_neutron_heat-8d799d8ffc76f6da.yaml b/releasenotes/notes/add-enable_neutron_heat-8d799d8ffc76f6da.yaml deleted file mode 100644 index ac91a2e20..000000000 --- a/releasenotes/notes/add-enable_neutron_heat-8d799d8ffc76f6da.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - New configuration options for enable_neutron and enable_heat are added to - the standalone and undercloud installers. These options default to true, - and can be used to selectively disable these services. diff --git a/releasenotes/notes/add-fencing-parameter-generation-c0ae21e0fee4f350.yaml b/releasenotes/notes/add-fencing-parameter-generation-c0ae21e0fee4f350.yaml deleted file mode 100644 index 4bdac9eb3..000000000 --- a/releasenotes/notes/add-fencing-parameter-generation-c0ae21e0fee4f350.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds `overcloud generate fencing` command, which outputs an environment - file which can be used to configure node fencing in HA deployments. - Currently IPMI and virtual (non-production) deployments are supported. diff --git a/releasenotes/notes/add-ffu-cli-48e6039749f1fcdb.yaml b/releasenotes/notes/add-ffu-cli-48e6039749f1fcdb.yaml deleted file mode 100644 index 16ff7e6e2..000000000 --- a/releasenotes/notes/add-ffu-cli-48e6039749f1fcdb.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - Adds a cli for fast forward upgrades, in particular the - - openstack overcloud ffwd-upgrade prepare - openstack overcloud ffwd-upgrade run - openstack overcloud ffwd-upgrade converge - - Which are meant to be the first, second and final step in the fast-forward - upgrade workflow. See the ffwd upgrade docs for more information on how to - use these cli commands, and the list of parameters is available with - openstack overcloud ffwd-upgrade [prepare,run,converge] --help diff --git a/releasenotes/notes/add-frr-service-option-71c20b6512f435dc.yaml b/releasenotes/notes/add-frr-service-option-71c20b6512f435dc.yaml deleted file mode 100644 index dbedde817..000000000 --- a/releasenotes/notes/add-frr-service-option-71c20b6512f435dc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added option ``enable_frr`` to enable the TripleO FRR service in standalone - and undercloud deployments. diff --git a/releasenotes/notes/add-heat-type-cli-arg-2fa4f47a835aafea.yaml b/releasenotes/notes/add-heat-type-cli-arg-2fa4f47a835aafea.yaml deleted file mode 100644 index 274a79b47..000000000 --- a/releasenotes/notes/add-heat-type-cli-arg-2fa4f47a835aafea.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - A new cli argument, --heat-type is added to openstack overcloud deploy. - Available options are "installed", "pod", "container", and "native". The - default is "installed". The argument specifies the type of Heat process to - use for the deployment. diff --git a/releasenotes/notes/add-ipa-cleanup-to-overcloud-delete-bf803bc67a4b38c2.yaml b/releasenotes/notes/add-ipa-cleanup-to-overcloud-delete-bf803bc67a4b38c2.yaml deleted file mode 100644 index 2e123b311..000000000 --- a/releasenotes/notes/add-ipa-cleanup-to-overcloud-delete-bf803bc67a4b38c2.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - The `overcloud delete` subcommand now supports cleaning up overcloud hosts, - services, and DNS entries in FreeIPA. This is applicable to deployments - with TLS support enabled since FreeIPA serves DNS and manages certificates - for overcloud infrastructure. This subcommand also includes a new option - called ``--skip-ipa-cleanup`` that allows the caller to forego cleaning up - FreeIPA. This may be useful when deployers want to forcibly cleanup - overcloud stacks and leave FreeIPA entries intact (e.g., network partition - events where the FreeIPA server isn't reachable). Note that you will need - to manually cleanup FreeIPA if you use ``--skip-ipa-cleanup``. diff --git a/releasenotes/notes/add-ironic-bios-interface-3fdd5587a60fdb31.yaml b/releasenotes/notes/add-ironic-bios-interface-3fdd5587a60fdb31.yaml deleted file mode 100644 index d7652ca55..000000000 --- a/releasenotes/notes/add-ironic-bios-interface-3fdd5587a60fdb31.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Automatically enables ironic BIOS management interfaces based on enabled - ironic hardware types supporting BIOS management. diff --git a/releasenotes/notes/add-ironic-inspect-interface-90b6ff6cfc052eb4.yaml b/releasenotes/notes/add-ironic-inspect-interface-90b6ff6cfc052eb4.yaml deleted file mode 100644 index bae67a9d9..000000000 --- a/releasenotes/notes/add-ironic-inspect-interface-90b6ff6cfc052eb4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Automatically enables ironic inspection interfaces for enabled ironic - hardware types supporting out-of-band inspection. diff --git a/releasenotes/notes/add-network-unprovision-overcloud-delete-5c36ff706ab809ea.yaml b/releasenotes/notes/add-network-unprovision-overcloud-delete-5c36ff706ab809ea.yaml deleted file mode 100644 index c4cd90512..000000000 --- a/releasenotes/notes/add-network-unprovision-overcloud-delete-5c36ff706ab809ea.yaml +++ /dev/null @@ -1,5 +0,0 @@ -features: - - | - Added options for "overcloud delete" command to unprovision - networks provisioned with "overcloud deploy" or with - "overcloud network provision". diff --git a/releasenotes/notes/add-networks-data-support-to-standalone-c06e29b44f44b6d5.yaml b/releasenotes/notes/add-networks-data-support-to-standalone-c06e29b44f44b6d5.yaml deleted file mode 100644 index 791fc9d94..000000000 --- a/releasenotes/notes/add-networks-data-support-to-standalone-c06e29b44f44b6d5.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Standalone deployment now support for custom networks data - (``network_data.yaml``). By default Standalone deploys with no networks - data (all services on the ctlplane network). The new option - ``networks_file`` can be used to provide custom networks data. diff --git a/releasenotes/notes/add-node-unprovision-overcloud-delete-c7dd063912d4ebca.yaml b/releasenotes/notes/add-node-unprovision-overcloud-delete-c7dd063912d4ebca.yaml deleted file mode 100644 index fe6c1bff2..000000000 --- a/releasenotes/notes/add-node-unprovision-overcloud-delete-c7dd063912d4ebca.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added options for "overcloud delete" command to unprovision nodes - and network ports provisioned with "overcloud deploy". diff --git a/releasenotes/notes/add-platform-option-97d92380b9ff52f1.yaml b/releasenotes/notes/add-platform-option-97d92380b9ff52f1.yaml deleted file mode 100644 index 6b32d61ef..000000000 --- a/releasenotes/notes/add-platform-option-97d92380b9ff52f1.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - In certain situations it may be desirable to provide optimised overcloud - images for deployed nodes. In order to achieve this add a ``--platform`` - option to ``openstack overcloud image upload``. This option will then be - used to select appropriate images based on the combination of architecture - and platform. diff --git a/releasenotes/notes/add-roles-and-networks-answers-f23927c9075c7e99.yaml b/releasenotes/notes/add-roles-and-networks-answers-f23927c9075c7e99.yaml deleted file mode 100644 index 2105084dd..000000000 --- a/releasenotes/notes/add-roles-and-networks-answers-f23927c9075c7e99.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Users can now specify roles_data.yaml and network_data.yaml locations - in their answers file. Using `networks` and `roles` along with their - `templates` and `environments` arguments. diff --git a/releasenotes/notes/add-skip-tags-overcloud-upgrade-run-6aaf5925ffc02359.yaml b/releasenotes/notes/add-skip-tags-overcloud-upgrade-run-6aaf5925ffc02359.yaml deleted file mode 100644 index 5b020b0c4..000000000 --- a/releasenotes/notes/add-skip-tags-overcloud-upgrade-run-6aaf5925ffc02359.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -upgrade: - - | - This adds a --skip-tags parameter to the openstack overcloud upgrade run - command - - .. code-block:: bash - - openstack overcloud upgrade run --nodes compute-0 --skip-tags validation - - This is useful for skipping those step 0 tasks (tagged "validation") that - check if services are running before allowing the upgrade to proceed, especially - if you must re-run the upgrade after a failed attempt and some services - cannot easily be started. The currently supported values for this are - validation and pre-upgrade, and they can be combined as "--skip-tags - 'validation,pre-upgrade'" if required. diff --git a/releasenotes/notes/add-uc-reproduce-command-65daa4386142fcd1.yaml b/releasenotes/notes/add-uc-reproduce-command-65daa4386142fcd1.yaml deleted file mode 100644 index f6e09fd66..000000000 --- a/releasenotes/notes/add-uc-reproduce-command-65daa4386142fcd1.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - Expose the existing --reproduce-command from `openstack tripleo deploy` - CLI in the Undercloud CLI commnads. - A new CLI option --reproduce-command is available for the `openstack - undercloud install` and `openstack undercloud upgrade` commands, which - creates an script, named ansible-playbook-command.sh, in the Undercloud's - deployment artifacts directory. This script allows running the Ansible - playbooks for deployment or upgrade in the same way the CLI command does. diff --git a/releasenotes/notes/add-undercloud_enable_selinux-configuration-67d2fb994496ef16.yaml b/releasenotes/notes/add-undercloud_enable_selinux-configuration-67d2fb994496ef16.yaml deleted file mode 100644 index 1f3d45223..000000000 --- a/releasenotes/notes/add-undercloud_enable_selinux-configuration-67d2fb994496ef16.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add `undercloud_enable_selinux` configuration to the undercloud.conf. - This option is a boolean option to enable or disable SELinux during the - undercloud installation. diff --git a/releasenotes/notes/add-user-confirmation-to-node-delete-ca8c240bfd71c0ba.yaml b/releasenotes/notes/add-user-confirmation-to-node-delete-ca8c240bfd71c0ba.yaml deleted file mode 100644 index 09c5fe459..000000000 --- a/releasenotes/notes/add-user-confirmation-to-node-delete-ca8c240bfd71c0ba.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Added a y/n prompt to the node delete command to prevent a user from - accidently executing a node delete action since it's a destructive - action. diff --git a/releasenotes/notes/add_limit_args_for_admin_authorize-2fe6945515dd34a7.yaml b/releasenotes/notes/add_limit_args_for_admin_authorize-2fe6945515dd34a7.yaml deleted file mode 100644 index 94ad14626..000000000 --- a/releasenotes/notes/add_limit_args_for_admin_authorize-2fe6945515dd34a7.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - The Admin Authorize command can now be targeted at - specific nodes using '--limit'. It can also take a - custom static-inventory using '--static-inventory'. -fixes: - - | - Fixes Admin Authorize to work with Ephemeral Heat. diff --git a/releasenotes/notes/add_save_swift_parameter_to_undercloud_backup-894e0bb4b3562a78.yaml b/releasenotes/notes/add_save_swift_parameter_to_undercloud_backup-894e0bb4b3562a78.yaml deleted file mode 100644 index e84ce3480..000000000 --- a/releasenotes/notes/add_save_swift_parameter_to_undercloud_backup-894e0bb4b3562a78.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Adds a new --save-swift parameter to undercloud-backup. This is due to the - fact that in the past the backup would be always saved on swift and the - next backup would contain the previous backup thus increasing exponentially - The default is false and that saves to the filesytem diff --git a/releasenotes/notes/admin_authorize_deprecate-overcloud-ssh-network-a2a1bdf745c4796c.yaml b/releasenotes/notes/admin_authorize_deprecate-overcloud-ssh-network-a2a1bdf745c4796c.yaml deleted file mode 100644 index 05af2cbe9..000000000 --- a/releasenotes/notes/admin_authorize_deprecate-overcloud-ssh-network-a2a1bdf745c4796c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The ``--overcloud-ssh-network`` option of the ``openstack admin authorize`` - command has been deprecated. This option has has no effect. diff --git a/releasenotes/notes/allow-running-validations-with-custom-extra-variables-12c7277b30eb791d.yaml b/releasenotes/notes/allow-running-validations-with-custom-extra-variables-12c7277b30eb791d.yaml deleted file mode 100644 index ad191a8ba..000000000 --- a/releasenotes/notes/allow-running-validations-with-custom-extra-variables-12c7277b30eb791d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - The operator is now able to pass extra variables while executing validations - through the command line. The command line will accept either a Dict with - the new --extra-vars argument or the absolute path of a file (JSON or YAML - when using ansible and JSON only when using Mistral) with the new - --extra-vars-file argument. diff --git a/releasenotes/notes/ansible-default-config-920461117f0bd427.yaml b/releasenotes/notes/ansible-default-config-920461117f0bd427.yaml deleted file mode 100644 index f6cb00f9a..000000000 --- a/releasenotes/notes/ansible-default-config-920461117f0bd427.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -features: - - | - ``tripleo config generate ansible`` generates the default ``ansible.cfg`` - in the given ``--output-dir`` (defaults to `$HOME`). The remote user - setting for ansible will be set to the ``--deployment-user`` value - (defaults to 'stack'). - - .. note:: Do not confuse the generated config with ``~/.ansible.cfg``. - The latter takes the lower precedence. - - You may want to customize the generated config so it will be used - with all undercloud and standalone deployments. - - .. note:: Overcloud deployments use Mistral workflows to configure ansible - for its own use, but the basic configuration it takes looks very - similar. diff --git a/releasenotes/notes/ansible-forks-arg-9f7b439e4b6980dd.yaml b/releasenotes/notes/ansible-forks-arg-9f7b439e4b6980dd.yaml deleted file mode 100644 index 7f2dcbb1d..000000000 --- a/releasenotes/notes/ansible-forks-arg-9f7b439e4b6980dd.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - A new `--ansible-forks` argument has been added to the TripleO and Overcloud - commands. The default value for forks has also been adjusted to no longer - exceed 100 forks. diff --git a/releasenotes/notes/ansible_connection_timeout-78e45f20ff91f672.yaml b/releasenotes/notes/ansible_connection_timeout-78e45f20ff91f672.yaml deleted file mode 100644 index 12e75f80f..000000000 --- a/releasenotes/notes/ansible_connection_timeout-78e45f20ff91f672.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - `openstack overcloud node delete` now can take - `--overcloud-ssh-port-timeout` to configure connection timeout - for Ansible. While `--timeout` configures the command timeout as expected. -fixes: - - | - Ansible connection timeout used for config download and the deployment - timeout now will be given proper values. It fixes `bug 1868063 - `__. diff --git a/releasenotes/notes/block-uc-upgrade-for-network-plugin-change-ba8459b171b8e37e.yaml b/releasenotes/notes/block-uc-upgrade-for-network-plugin-change-ba8459b171b8e37e.yaml deleted file mode 100644 index 19c9d5e92..000000000 --- a/releasenotes/notes/block-uc-upgrade-for-network-plugin-change-ba8459b171b8e37e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - Undercloud upgrade is not allowed if a change in network plugin - i.e ovs to ovn or ovn to ovs is detected. Such a change will break - the undercloud as just switching is not enough it also requires - network resources to be migrated. diff --git a/releasenotes/notes/boot_mode-ef25d1a032dcae56.yaml b/releasenotes/notes/boot_mode-ef25d1a032dcae56.yaml deleted file mode 100644 index 319586599..000000000 --- a/releasenotes/notes/boot_mode-ef25d1a032dcae56.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - The commands `openstack overcloud node import` and `openstack overcloud node - configure` now have a --boot-mode arguement which allows the boot mode for - all affected nodes to be set to UEFI boot (uefi) or legacy BIOS boot (bios). - This allows some nodes to have a different boot mode to the default (uefi). \ No newline at end of file diff --git a/releasenotes/notes/bug-1743575-dcacfa668eaf51a6.yaml b/releasenotes/notes/bug-1743575-dcacfa668eaf51a6.yaml deleted file mode 100644 index 94bde9358..000000000 --- a/releasenotes/notes/bug-1743575-dcacfa668eaf51a6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - This patch fixes https://bugs.launchpad.net/tripleo/+bug/1743575 - The bug prevents a successeful stack creation when using a custom role with - a composable service. diff --git a/releasenotes/notes/bug-1996567-d45f9a25c31c14d2.yaml b/releasenotes/notes/bug-1996567-d45f9a25c31c14d2.yaml deleted file mode 100644 index 7bdba9a7c..000000000 --- a/releasenotes/notes/bug-1996567-d45f9a25c31c14d2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Now the ``openstack overcloud image build`` command uses CentOS Stream 9 as - base os by default. diff --git a/releasenotes/notes/bulk-status-missing-50cc60281c6e20f5.yaml b/releasenotes/notes/bulk-status-missing-50cc60281c6e20f5.yaml deleted file mode 100644 index 84565f183..000000000 --- a/releasenotes/notes/bulk-status-missing-50cc60281c6e20f5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The ``introspection bulk status`` command no longer aborts if some nodes - in the Ironic registry were never introspected. See bug `1689540 - `_. diff --git a/releasenotes/notes/capture-environment-status-and-logs-5f7f0f287d8465c5.yaml b/releasenotes/notes/capture-environment-status-and-logs-5f7f0f287d8465c5.yaml deleted file mode 100644 index 3a92081c4..000000000 --- a/releasenotes/notes/capture-environment-status-and-logs-5f7f0f287d8465c5.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Implemented new 'openstack overcloud support report' command to execute - a log collection and retrieval against overcloud nodes. This new command - allows an operator to perform sosreport retrieval from all nodes or - specific nodes based on their server name. diff --git a/releasenotes/notes/ceph_daemon_option-47a75a2c8b5ce5bb.yaml b/releasenotes/notes/ceph_daemon_option-47a75a2c8b5ce5bb.yaml deleted file mode 100644 index d9c001554..000000000 --- a/releasenotes/notes/ceph_daemon_option-47a75a2c8b5ce5bb.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -features: - - | - A new option --daemons for the "openstack overcloud ceph deploy" command - has been added. This option may be used to define additional Ceph daemons - that should be deployed at this stage. - For instance, a generic Ceph daemons definition can be something like the - following:: - - --- - ceph_nfs: - cephfs_data: 'manila_data' - cephfs_metadata: 'manila_metadata' - ceph_rgw: {} - ceph_ingress: - tripleo_cephadm_haproxy_container_image: undercloud.ctlplane.mydomain.tld:8787/ceph/haproxy:2.3 - tripleo_cephadm_keepalived_container_image: undercloud.ctlplane.mydomain.tld:8787/ceph/keepalived:2.5.1 - - For each service added to the data structure above, additional options can - be defined and passed as extra_vars to the tripleo-ansible flow. - If no option is specified, the default values provided by the cephadm - tripleo-ansible role will be used. diff --git a/releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml b/releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml deleted file mode 100644 index cb888e033..000000000 --- a/releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -features: - - | - Two new commands, "openstack overcloud ceph user enable" and - "openstack overcloud ceph user disable" are added. The "enable" - option will create the cephadm SSH user and distribute their - SSH keys to Ceph nodes in the overcloud. The "disable" option - may be run after "openstack overcloud ceph deploy" has been run - to disable cephadm so that it may not be used to administer the - Ceph cluster and no "ceph orch ..." CLI commands will function. - This will also prevent Ceph node overcloud scale operations though - the Ceph cluster will still be able to read/write data. The "ceph - user disable" option will also remove the public and private SSH - keys of the cephadm SSH user on overclouds which host Ceph. The - "ceph user enable" option may also be used to re-distribute the - public and private SSH keys of the cephadm SSH user and re-enable - the cephadm mgr module. \ No newline at end of file diff --git a/releasenotes/notes/ceph_vip_provisioning-dcac72d62c70c57c.yaml b/releasenotes/notes/ceph_vip_provisioning-dcac72d62c70c57c.yaml deleted file mode 100644 index 5f3012f76..000000000 --- a/releasenotes/notes/ceph_vip_provisioning-dcac72d62c70c57c.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -features: - - | - A new option --ceph-vip for the "openstack overcloud ceph deploy" command - has been added. This option may be used to reserve VIP(s) for each Ceph - service specified by the 'service/network' mapping defined as input. - For instance, a generic ceph service mapping can be something like the - following:: - - --- - ceph_services: - - service: ceph_nfs - network: storage_cloud_0 - - service: ceph_rgw - network: storage_cloud_0 - - For each service added to the list above, a virtual IP on the specified - network is created to be used as frontend_vip of the ingress daemon. When - no subnet is specified, a default `_subnet` pattern is used. If - the subnet does not follow the `_subnet` pattern, a subnet for - the VIP may be specified per service:: - - --- - ceph_services: - - service: ceph_nfs - network: storage_cloud_0 - - service: ceph_rgw - network: storage_cloud_0 - subnet: storage_leafX - - When the `subnet` parameter is provided, it will be used by the ansible - module, otherwise the default pattern is followed. This feature also - supports the fixed_ips mode. When fixed_ip(s) are defined, the module is - able to use that input to reserve the VIP on that network. A valid input - can be something like the following:: - - --- - fixed: true - ceph_services: - - service: ceph_nfs - network: storage_cloud_0 - ip_address: 172.16.11.159 - - service: ceph_rgw - network: storage_cloud_0 - ip_address: 172.16.11.160 - - When the boolean fixed is set to True, the subnet pattern is ignored, and - a sanity check on the user input is performed, looking for the ip_address - keys associated to the specified services. - If the `fixed` keyword is missing, the subnet pattern is followed. diff --git a/releasenotes/notes/check-undercloud-disk-space-709ebf574e91ae3a.yaml b/releasenotes/notes/check-undercloud-disk-space-709ebf574e91ae3a.yaml deleted file mode 100644 index 625415c82..000000000 --- a/releasenotes/notes/check-undercloud-disk-space-709ebf574e91ae3a.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Enable new preflight check on the undercloud, using ansible playbooks from - openstack-tripleo-validations. - - Check runs differently if we're on a brand new deploy or an upgrade, as we - don't need the same amount of free space. diff --git a/releasenotes/notes/cleanup-docker_opts-65aa01111417cadb.yaml b/releasenotes/notes/cleanup-docker_opts-65aa01111417cadb.yaml deleted file mode 100644 index c2b397c69..000000000 --- a/releasenotes/notes/cleanup-docker_opts-65aa01111417cadb.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The following two options have been removed. - - - ``docker_registry_mirror`` - - ``docker_insecure_registries`` diff --git a/releasenotes/notes/config-download-default-dir-changed-58a4b756b80050d0.yaml b/releasenotes/notes/config-download-default-dir-changed-58a4b756b80050d0.yaml deleted file mode 100644 index f3aec6a08..000000000 --- a/releasenotes/notes/config-download-default-dir-changed-58a4b756b80050d0.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The default location for ``openstack overcloud config download`` has - changed to ``~/tripleo-config``. ``config download`` also no longer - uses tmpdirs and will overwrite files on subsequent runs to the same - ``--config-dir`` location. diff --git a/releasenotes/notes/config-download-default-to-true-1423abc46b294938.yaml b/releasenotes/notes/config-download-default-to-true-1423abc46b294938.yaml deleted file mode 100644 index 1e63223e2..000000000 --- a/releasenotes/notes/config-download-default-to-true-1423abc46b294938.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Using --config-download is now the default. A new CLI argument, - --no-config-download (or --stack-only) can be used to disable the - config-download workflow. diff --git a/releasenotes/notes/config-download-in-progress-4af02517cb5a5c0c.yaml b/releasenotes/notes/config-download-in-progress-4af02517cb5a5c0c.yaml deleted file mode 100644 index ec39aaccc..000000000 --- a/releasenotes/notes/config-download-in-progress-4af02517cb5a5c0c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The client (tripleoclient) now raises a new exception, - ConfigDownloadInProgress, if there is already an instance of the - tripleo.deployment.v1.config_download_deploy workflow in progress for the - current stack. diff --git a/releasenotes/notes/config-download-only-d82ae32e13595f93.yaml b/releasenotes/notes/config-download-only-d82ae32e13595f93.yaml deleted file mode 100644 index a1e64a47c..000000000 --- a/releasenotes/notes/config-download-only-d82ae32e13595f93.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A new CLI argument, --config-download-only, has been added which can be - used to skip the stack create/update and only run the config-download - workflow to apply the software configuration. diff --git a/releasenotes/notes/config-download-setup-only-1423abc46b294939.yaml b/releasenotes/notes/config-download-setup-only-1423abc46b294939.yaml deleted file mode 100644 index e2870d5e3..000000000 --- a/releasenotes/notes/config-download-setup-only-1423abc46b294939.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The --setup-only option has been added to facilitate the generation - of ansible content and ensure the environment is accessible via the - current stack. diff --git a/releasenotes/notes/config-download-timeout-82ab8914f998631f.yaml b/releasenotes/notes/config-download-timeout-82ab8914f998631f.yaml deleted file mode 100644 index bfbd98fc7..000000000 --- a/releasenotes/notes/config-download-timeout-82ab8914f998631f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The timeout specified with --timeout will now be honored with - config-download. An additional cli arg, --config-download-timeout is also - added that can be used to specify a specific timeout (in minutes) just for - the config-download part of the deployment. diff --git a/releasenotes/notes/config-download-verbosity-75fa34c110c00657.yaml b/releasenotes/notes/config-download-verbosity-75fa34c110c00657.yaml deleted file mode 100644 index a3978b80f..000000000 --- a/releasenotes/notes/config-download-verbosity-75fa34c110c00657.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The verbosity of the config-download ansible tasks for deployment are now - controlled by the verbosity level specified on the command line. diff --git a/releasenotes/notes/config_download-5ba7f496b2186b68.yaml b/releasenotes/notes/config_download-5ba7f496b2186b68.yaml deleted file mode 100644 index 45cb7aae5..000000000 --- a/releasenotes/notes/config_download-5ba7f496b2186b68.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - A new command "openstack overcloud config download" has been added which - enables download of the configuration data used by TripleO for debugging - or running outside the control of the overcloud heat stack. Note that - running configuration tools outside of the heat stack experimental at this - stage and should be used with caution. diff --git a/releasenotes/notes/consistent-deploy-dir-b16c43c4f94c03ca.yaml b/releasenotes/notes/consistent-deploy-dir-b16c43c4f94c03ca.yaml deleted file mode 100644 index cf10d0513..000000000 --- a/releasenotes/notes/consistent-deploy-dir-b16c43c4f94c03ca.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - A consistent working directory is now used by the openstack overcloud - deploy command. The directory contains input files, generated files, and - log files, among others. The directory defaults to - ~/overcloud-deploy/. The old default config-download directory - at ~/config-download/ is now a symlink to the new location at - ~/overcloud-deploy//config-download/ for backwards - compatibility. diff --git a/releasenotes/notes/container-build-exclude-option-4c4d3899f7a2649c.yaml b/releasenotes/notes/container-build-exclude-option-4c4d3899f7a2649c.yaml deleted file mode 100644 index e51a6f25c..000000000 --- a/releasenotes/notes/container-build-exclude-option-4c4d3899f7a2649c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Added new `exclude` option to the container build command that allows the - user to skip building a specific container. This option can be specified - muiltiple times to skip building multiple containers. diff --git a/releasenotes/notes/container-image-commands-49eaa9c338085844.yaml b/releasenotes/notes/container-image-commands-49eaa9c338085844.yaml deleted file mode 100644 index a408abe69..000000000 --- a/releasenotes/notes/container-image-commands-49eaa9c338085844.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - New command ``overcloud container image upload``. This is the container - image equivalent to the ``overcloud image upload`` command. - - | - New command ``overcloud container image build``. This command receives the - same ``--config-file`` argument as ``overcloud container image upload`` and - invokes kolla-build to build the images specified in that file. diff --git a/releasenotes/notes/container_cli-803238248de9c60a.yaml b/releasenotes/notes/container_cli-803238248de9c60a.yaml deleted file mode 100644 index 73d936f46..000000000 --- a/releasenotes/notes/container_cli-803238248de9c60a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - The ``container_cli`` parameter no longer accepts ``docker``. Now only - ``podman`` is accepted as a valid value. diff --git a/releasenotes/notes/container_work_dir-edb40007cb25168e.yaml b/releasenotes/notes/container_work_dir-edb40007cb25168e.yaml deleted file mode 100644 index 64602d14a..000000000 --- a/releasenotes/notes/container_work_dir-edb40007cb25168e.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Add --work-dir to openstack overcloud container image build command and - every run will create a unique workspace which where will be stored Kolla - configs and build logs. Default directory will be in - /tmp/container-builds. UUIDs are used to identify each time we run the - command and will be the directory name in the work dir. diff --git a/releasenotes/notes/ctlplane-undercloud-conf-host-routes-7084bf696020c39e.yaml b/releasenotes/notes/ctlplane-undercloud-conf-host-routes-7084bf696020c39e.yaml deleted file mode 100644 index 53f8785a1..000000000 --- a/releasenotes/notes/ctlplane-undercloud-conf-host-routes-7084bf696020c39e.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - A new option ``host_routes`` are now available for subnet defenitions in - ``undercloud.conf``. - - - Host routes specified for the *local_subnet* will be added to - the routing table on the Undercloud. - - Host routes for all subnets are passed to tripleo-heat-templates so that - the *host_routes* property of the ctlplane subnets are set accordingly - when installing the Undercloud. diff --git a/releasenotes/notes/default-kolla-conf-9b5aaab1931f00a0.yaml b/releasenotes/notes/default-kolla-conf-9b5aaab1931f00a0.yaml deleted file mode 100644 index 124808236..000000000 --- a/releasenotes/notes/default-kolla-conf-9b5aaab1931f00a0.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a default kolla conf file for the container image building command - diff --git a/releasenotes/notes/default_ironic_http_boot-4b1d3769635b829f.yaml b/releasenotes/notes/default_ironic_http_boot-4b1d3769635b829f.yaml deleted file mode 100644 index b12aef8df..000000000 --- a/releasenotes/notes/default_ironic_http_boot-4b1d3769635b829f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - The default value of `--http-boot` changed from `/httpboot` to - `/var/lib/ironic/httpboot` as containerized Ironic services - expect. diff --git a/releasenotes/notes/deploy-overcloud-pre-provisioned-9d55ca9bda6c8a84.yaml b/releasenotes/notes/deploy-overcloud-pre-provisioned-9d55ca9bda6c8a84.yaml deleted file mode 100644 index 9ce3c4720..000000000 --- a/releasenotes/notes/deploy-overcloud-pre-provisioned-9d55ca9bda6c8a84.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Add the `--deployed-server` flag that can only be used with the - `--disable-validations`. When specified, allows to deploy an - overcloud on the pre-provisioned nodes and ignores missing nova - and ironic UC services. diff --git a/releasenotes/notes/deploy-undercloud-with-heat-789655d324b2727b.yaml b/releasenotes/notes/deploy-undercloud-with-heat-789655d324b2727b.yaml deleted file mode 100644 index d10eb7fb2..000000000 --- a/releasenotes/notes/deploy-undercloud-with-heat-789655d324b2727b.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - EXPERIMENTAL feature to install the undercloud with openstack heat - add support for a containerized undercloud. It checks for prequisites - and installs missing packages then deploys undercloud with heat-all. - - - | - New flag `--install-kolla`, defaults to False. Adds or removes Kolla - packages to/from the list of required packages to be installed by the - ``openstack undercloud deploy`` command. Set it to True, if you want - to build Kolla containers on the undercloud node as the part of your - continuous deployment pipeline. diff --git a/releasenotes/notes/deployed_server_default-0c2267c7588056fc.yaml b/releasenotes/notes/deployed_server_default-0c2267c7588056fc.yaml deleted file mode 100644 index b3bf52ed0..000000000 --- a/releasenotes/notes/deployed_server_default-0c2267c7588056fc.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - Ephemeral heat is now used as default for overcloud deployment - and assumes the nodes are pre-provisioned using metalsmith. Deprecates - existing ``--deployed-server`` option and adds an additional option - ``--provision-nodes`` for using installed heat and provisioning - nodes with heat. diff --git a/releasenotes/notes/deployment-rhel-registration-cli-removal-cf36ac9fb4d81a04.yaml b/releasenotes/notes/deployment-rhel-registration-cli-removal-cf36ac9fb4d81a04.yaml deleted file mode 100644 index 17f591ec6..000000000 --- a/releasenotes/notes/deployment-rhel-registration-cli-removal-cf36ac9fb4d81a04.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - The rhel registration options that were part of the `openstack overcloud deploy` - have been removed. Please use the `RhsmVars` option in an environment file - to deploy. See https://docs.openstack.org/tripleo-docs/latest/install/advanced_deployment/rhsm.html diff --git a/releasenotes/notes/deployment_user-bc451a45754035b9.yaml b/releasenotes/notes/deployment_user-bc451a45754035b9.yaml deleted file mode 100644 index 87126b65e..000000000 --- a/releasenotes/notes/deployment_user-bc451a45754035b9.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Introduce deployment_user parameter, default to the current user, - will feed DeploymentUser parameter in THT, primarly used to add the - user to the 'docker' group, so our operators can run the `overcloud - container` commands when the undercloud is containerized. diff --git a/releasenotes/notes/deprecate-baremetal-commands-d24279b6a7cf97d6.yaml b/releasenotes/notes/deprecate-baremetal-commands-d24279b6a7cf97d6.yaml deleted file mode 100644 index 58e4d8673..000000000 --- a/releasenotes/notes/deprecate-baremetal-commands-d24279b6a7cf97d6.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -deprecations: - - The ``openstack baremetal import``, ``openstack baremetal - introspect bulk start`` and ``openstack baremetal configure`` are - now deprecated in favour of ``openstack overcloud node import``, - ``openstack overcloud node introspect`` and ``openstack overcloud - node configure`` respectively. See `bug 1649541 - `__. diff --git a/releasenotes/notes/deprecate-docker-config-options-ebf403648b096929.yaml b/releasenotes/notes/deprecate-docker-config-options-ebf403648b096929.yaml deleted file mode 100644 index c5d16f76a..000000000 --- a/releasenotes/notes/deprecate-docker-config-options-ebf403648b096929.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -deprecations: - - | - `docker_bip` undercloud configuration option will be removed. - - | - `docker_insecure_registries` has been deprecated for `container_insecure_registries` - - | - `docker_registry_mirror` has been deprecated for `container_registry_mirror` diff --git a/releasenotes/notes/deprecate-glance-image-upload-ef23d3078f430ccf.yaml b/releasenotes/notes/deprecate-glance-image-upload-ef23d3078f430ccf.yaml deleted file mode 100644 index 509a04b59..000000000 --- a/releasenotes/notes/deprecate-glance-image-upload-ef23d3078f430ccf.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -deprecations: - - | - The ``--local`` argument to the ``openstack overcloud image upload`` - command has been deprecated and a ``--no-local`` argument has been - added. Earlier we used to fallback to upload locally when glance - was not available. As glance is not installed in the undercloud by - default, we would upload images locally unless ``--no-local`` is - used. diff --git a/releasenotes/notes/deprecate-minion-83d87a36a2e74ecc.yaml b/releasenotes/notes/deprecate-minion-83d87a36a2e74ecc.yaml deleted file mode 100644 index 9e5926352..000000000 --- a/releasenotes/notes/deprecate-minion-83d87a36a2e74ecc.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - With the move to Nova-less provisioning and Neutron network management - being removed from being managed via Heat, the undercloud minion - functionality is now deprecated. diff --git a/releasenotes/notes/deprecate-mistral-and-zaqar-aee14846fe3662d7.yaml b/releasenotes/notes/deprecate-mistral-and-zaqar-aee14846fe3662d7.yaml deleted file mode 100644 index fd95a0739..000000000 --- a/releasenotes/notes/deprecate-mistral-and-zaqar-aee14846fe3662d7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The ``enable_mistral`` parameter and the ``enable_zaqar`` parameter in - undercloud/standalone deployment have been deprecated and will be removed - in a future release. diff --git a/releasenotes/notes/deprecate-novajoin-6493fa10ee3e09cf.yaml b/releasenotes/notes/deprecate-novajoin-6493fa10ee3e09cf.yaml deleted file mode 100644 index 2215af354..000000000 --- a/releasenotes/notes/deprecate-novajoin-6493fa10ee3e09cf.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The ``enable_novajoin`` parameter in undercloud/standalone deployment has - been deprecated. diff --git a/releasenotes/notes/deprecate-overcloud-container-prepare-45b61e76072b2736.yaml b/releasenotes/notes/deprecate-overcloud-container-prepare-45b61e76072b2736.yaml deleted file mode 100644 index 06eaa519e..000000000 --- a/releasenotes/notes/deprecate-overcloud-container-prepare-45b61e76072b2736.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -deprecations: - - | - `openstack overcloud container image prepare` has been - deprecated and replaced by `openstack tripleo container image prepare` - - | - `openstack overcloud container image tag discover` has been - deprecated and replaced by `openstack tripleo container image prepare` diff --git a/releasenotes/notes/deprecate-overcloud-profiles-0bc0a368775844ad.yaml b/releasenotes/notes/deprecate-overcloud-profiles-0bc0a368775844ad.yaml deleted file mode 100644 index 11b368f69..000000000 --- a/releasenotes/notes/deprecate-overcloud-profiles-0bc0a368775844ad.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -deprecations: - - | - The commands ``openstack overcloud profiles list`` and ``openstack - overcloud profiles match`` has been deprecated for removal. Since the - Compute service is no longer used on the undercloud, the flavors based - scheduling is not used. - diff --git a/releasenotes/notes/deprecate-pull-source-1d904126040c2eb1.yaml b/releasenotes/notes/deprecate-pull-source-1d904126040c2eb1.yaml deleted file mode 100644 index 19c6a9746..000000000 --- a/releasenotes/notes/deprecate-pull-source-1d904126040c2eb1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: -- The command "openstack overcloud container image prepare" has deprecated - the --pull-source argument. The source registry should now be specified as - part of the --namespace argument. diff --git a/releasenotes/notes/deprecate-stack-action-c3912259bfc9f509.yaml b/releasenotes/notes/deprecate-stack-action-c3912259bfc9f509.yaml deleted file mode 100644 index 81b1a0782..000000000 --- a/releasenotes/notes/deprecate-stack-action-c3912259bfc9f509.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - `force-stack-update` and `force_stack_create` cli arguments - for undercloud/standalone deploy has been deprecated and - are now irrelevant. diff --git a/releasenotes/notes/deprecate-standalone-argument-57660f3023dc3220.yaml b/releasenotes/notes/deprecate-standalone-argument-57660f3023dc3220.yaml deleted file mode 100644 index 03466ff4e..000000000 --- a/releasenotes/notes/deprecate-standalone-argument-57660f3023dc3220.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - | - The --standalone argument to the openstack tripleo deploy command is now - deprecated. The argument previously had no effect other than allow the - tripleo deploy command to not throw an exception, so it can be safely - deprecated with no replacement. diff --git a/releasenotes/notes/deprecate-static-inventory-option-4342b01aebaa4af8.yaml b/releasenotes/notes/deprecate-static-inventory-option-4342b01aebaa4af8.yaml deleted file mode 100644 index 7aa1cba4c..000000000 --- a/releasenotes/notes/deprecate-static-inventory-option-4342b01aebaa4af8.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The --static-inventory argument to the openstack update/upgrade commands - has been deprecated and would be ignored. The inventory generated in - work_dir with update/upgrade prepare would be used instead. diff --git a/releasenotes/notes/deprecate-tripleo-deploy-plan-environment-file-baeebec064bfd44a.yaml b/releasenotes/notes/deprecate-tripleo-deploy-plan-environment-file-baeebec064bfd44a.yaml deleted file mode 100644 index 0844c2ef5..000000000 --- a/releasenotes/notes/deprecate-tripleo-deploy-plan-environment-file-baeebec064bfd44a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The --plan-environment-file option of openstack tripleo deploy command - is now deprecated and not used. diff --git a/releasenotes/notes/deprecate-tripleo-ui-12328976b429426f.yaml b/releasenotes/notes/deprecate-tripleo-ui-12328976b429426f.yaml deleted file mode 100644 index 3541ee380..000000000 --- a/releasenotes/notes/deprecate-tripleo-ui-12328976b429426f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - | - enable_ui is deprecated for removal in Train. diff --git a/releasenotes/notes/deprecate-uc-scheduler_max_attempts-9018c4635507940b.yaml b/releasenotes/notes/deprecate-uc-scheduler_max_attempts-9018c4635507940b.yaml deleted file mode 100644 index 9e5c74cdb..000000000 --- a/releasenotes/notes/deprecate-uc-scheduler_max_attempts-9018c4635507940b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The ``[DEFAULT] scheduler_max_attempts`` option of undercloud.conf has been - deprecated. The option has had no effect since nova was removed from - undercloud. diff --git a/releasenotes/notes/deprecate-undercloud-deploy-b3da98146e0c962d.yaml b/releasenotes/notes/deprecate-undercloud-deploy-b3da98146e0c962d.yaml deleted file mode 100644 index ea955836e..000000000 --- a/releasenotes/notes/deprecate-undercloud-deploy-b3da98146e0c962d.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - Create tripleo deploy action to be used as an interface to an standalone - installer. -deprecations: - - | - undercloud deploy action has been deprecated. The tripleo deploy action - with the --standalone option should be used instead. - diff --git a/releasenotes/notes/deprecate_instack-e6ffdc2bb886a3c8.yaml b/releasenotes/notes/deprecate_instack-e6ffdc2bb886a3c8.yaml deleted file mode 100644 index 141b0661b..000000000 --- a/releasenotes/notes/deprecate_instack-e6ffdc2bb886a3c8.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - instack-undercloud is deprecated in Rocky cycle and is replaced by - the containerized undercloud efforts in python-tripleoclient. diff --git a/releasenotes/notes/deprecated-network-data-v1-f1490ba769f6aa7a.yaml b/releasenotes/notes/deprecated-network-data-v1-f1490ba769f6aa7a.yaml deleted file mode 100644 index 246832391..000000000 --- a/releasenotes/notes/deprecated-network-data-v1-f1490ba769f6aa7a.yaml +++ /dev/null @@ -1,54 +0,0 @@ ---- -features: - - | - Neutron resources for overcloud deployments on the undercloud is now - managed with tooling external to the Heat stack. - - Networks and subnet resources as well as Virtual IPs for an overcloud - can either be manged using separate commands or the all-in-one - ``overcloud deploy`` by using the new network data YAML definition as - ``--networks-file`` and the ``--vip-file`` arguments with this command. - - Overcloud node network ports are now managed by the baremetal node - provisioning workflow. Baremeteal nodes, and the network resources, can - be provisioned using the separate ``overcloud node provision`` command. - Alternatively the all-in-one ``overcloud deploy`` command will run the - baremetal node provisioning steps if the ``--baremetal-deployment`` - argument is used. - - Please refer to the `Networking Version 2 (Two) - `_. - documentation page for more details. -upgrade: - - | - The network data defintion used with the overcloud deployement must be - updated to version 2. The undercloud upgrade will execute the command - (``openstack overcloud network extract``) to generate the network data - definition for each deployed overcloud. The undercloud upgrade will save - the new file in the working directory for each stack, defaults to - ``overcloud-deploy//tripleo--network-data.yaml``. - - | - A new YAML definition file for overcloud stack virtual IPs must be used. - The undercloud upgrade will execute the command (``overcloud network vip - extract``) to generate this file for each deployed overcloud. The - undercloud upgrade will save the new file in the working directory for - each stack, defaults to - ``overcloud-deploy/< STACK_NAME>/tripleo--virtual-ips.yaml``. - - The baremetal node defintion has been extended to support neutron port - resource managment, this requires additional input in the YAML definition - used for baremetal node provisioning. The undercloud upgrade will execute - the command (``overcloud node extract provisioned``) to generate this file - for each deployed overcloud. The undercloud upgrade will save the new file - in the working directory for each stack, defaults to - ``overcloud-deploy//tripleo--baremetal-deployment.yaml.yaml``. -deprecations: - - | - To enable management of neutron resources external to Heat the network data - YAML definition schema has been updated. The previous schema version has been - deprecated, using the deprecated v1 schema is only possible when the deprecated - non-ephemeral Heat on the Undercloud. - - | - Managing netron resources for composable networks is now enabled by default when - provisioning baremetal nodes. The option ``--network-ports`` has been deprecated, - and is a NOOP until it is fully removed. - diff --git a/releasenotes/notes/dhcp_start_stop_optional_for_remote_subnets-805b7d2ed7ed0863.yaml b/releasenotes/notes/dhcp_start_stop_optional_for_remote_subnets-805b7d2ed7ed0863.yaml deleted file mode 100644 index 355bd902d..000000000 --- a/releasenotes/notes/dhcp_start_stop_optional_for_remote_subnets-805b7d2ed7ed0863.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -features: - - | - The ``dhcp_start`` and ``dhcp_end`` options are now optional for subnet - definitions in the Undercloud configuration (``undercloud.conf``). - The the allocation_pools are calculated by removing the ``local_ip``, - ``gateway``, ``undercloud_admin_host``, ``undercloud_public_host`` and - ``inspection_iprange`` from the subnets full IP range. Allocation pools for - all remaining ranges will be configured. Additionally the new option - ``dhcp_exlcude`` can be used to exclude additional IP addresses and/or - IP address ranges, for example to exclude ``172.20.0.105`` and the range - ``172.20.0.210-172.20.0.219``:: - - dhcp_exclude = 172.20.0.105,172.20.0.210-172.20.0.219 - - * When ``dhcp_start`` is defined any addresses prior to this address is - also removed from the allocation pools. - * When ``dhcp_end`` is defined any addresses after this address is also - removed from the allocation pools. - - .. Note:: If the default cidr (``192.168.24.0/24``) is used for the local - subnet the ``dhcp_start`` and ``dhcp_end`` cannot simply be - removed to utilize the full address space of the subnet. This due - to the default values of ``dhcp_start`` and ``dhcp_end``. - - | - It is now possible to configure non-contiguous allocation pools for the - Undercloud ctlplane subnets. The ``dhcp_start`` and ``dhcp_end`` options - have been extended to allow a list of start and end address pairs. For - example to create allocation pools ``172.20.0.100-172.20.0.150`` and - ``172.20.0.200-172.20.0.250``:: - - dhcp_start = 172.20.0.100,172.20.0.200 - dhcp_end = 172.20.0.150,172.20.0.250 diff --git a/releasenotes/notes/disable-keystone-by-default-ee9cb13ee2e37f9f.yaml b/releasenotes/notes/disable-keystone-by-default-ee9cb13ee2e37f9f.yaml deleted file mode 100644 index 8532fa2c0..000000000 --- a/releasenotes/notes/disable-keystone-by-default-ee9cb13ee2e37f9f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Undercloud is now deployed without keystone and deploys standalone - openstack services with http_basic authentication. A new option - 'enable_keystone' has been added to enable keystone in the undercloud - if required. -upgrade: - - keystone service would not be deployed by default in the undercloud. diff --git a/releasenotes/notes/disable-password-generation-84a8be5686a8cf2e.yaml b/releasenotes/notes/disable-password-generation-84a8be5686a8cf2e.yaml deleted file mode 100644 index d899b4e01..000000000 --- a/releasenotes/notes/disable-password-generation-84a8be5686a8cf2e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - The ``openstack overcloud deploy`` and ``openstack overcloud plan - create`` commands now have a ``--disable-password-generation`` - argument, to allow operators to disable password generation and - only use passwords provided by them instead. By default, password - generation remains enabled. diff --git a/releasenotes/notes/disable-password-generation-uc-cced193be3d1aa86.yaml b/releasenotes/notes/disable-password-generation-uc-cced193be3d1aa86.yaml deleted file mode 100644 index dc886d573..000000000 --- a/releasenotes/notes/disable-password-generation-uc-cced193be3d1aa86.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - | - Instack undercloud legacy passwords should be customized by its original - ``undercloud-passwords.conf`` location, when upgrading from instack UC. - Those passwords will be automatically transitioned to - ``tripleo-undercloud-passwords.yaml`` during upgrade. Changes made to the - legacy location become ignored after that. Use - ``tripleo-undercloud-passwords.yaml`` to manually update UC passwords - further on. diff --git a/releasenotes/notes/discover-tag-3b0b073a95178e82.yaml b/releasenotes/notes/discover-tag-3b0b073a95178e82.yaml deleted file mode 100644 index 28461dec3..000000000 --- a/releasenotes/notes/discover-tag-3b0b073a95178e82.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - - The "openstack overcloud container image tag discover" command is provided - to discover the version-based tag by inspecting the image from a stable tag - like 'current-tripleo-rdo'. Stable tags like 'latest' or 'pike' can't be used - for container updates because something needs to change to trigger the new - containers being pulled. Without this command it would be up to the user to - find out what versioned tag to specify when calling prepare. diff --git a/releasenotes/notes/drop-ceph-ansible-playbook-param-87e3d0582ab640ba.yaml b/releasenotes/notes/drop-ceph-ansible-playbook-param-87e3d0582ab640ba.yaml deleted file mode 100644 index 3b1a6c044..000000000 --- a/releasenotes/notes/drop-ceph-ansible-playbook-param-87e3d0582ab640ba.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - | - The `--ceph-ansible-playbook` parameter has been removed from all - update- and upgrade-related commands. The parameter is not - necessary anymore, as the right playbook should be selected - automatically (but for cases when control is needed, overriding - CephAnsiblePlaybook parameter via environment file will still take - priority). Furthermore, the `--ceph-ansible-playbook` CLI - parameter was attempting to override the detection logic which - selects desired ceph-ansible playbook. diff --git a/releasenotes/notes/drop-ceph-upgrade-run-8f28475bf6b0af65.yaml b/releasenotes/notes/drop-ceph-upgrade-run-8f28475bf6b0af65.yaml deleted file mode 100644 index fd13ef9e5..000000000 --- a/releasenotes/notes/drop-ceph-upgrade-run-8f28475bf6b0af65.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - The `openstack overcloud ceph-upgrade run` command no longer works - in Rocky due to internal changes to TripleO (more direct execution - of Ansible). The command has been removed from the CLI. Ceph - update/upgrade in Rocky is meant to be performed via `openstack - overcloud external-update run` and `openstack overcloud - external-upgrade run` commands, respectively. diff --git a/releasenotes/notes/drop_mistral_support_for_the_validations_CLI-081bc5cd6e7db056.yaml b/releasenotes/notes/drop_mistral_support_for_the_validations_CLI-081bc5cd6e7db056.yaml deleted file mode 100644 index fe7fe0757..000000000 --- a/releasenotes/notes/drop_mistral_support_for_the_validations_CLI-081bc5cd6e7db056.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The TripleO Validator CLI doesn't support Mistral anymore for listing and - running the Validations. Ansible is now the way to run them. diff --git a/releasenotes/notes/ephemeral-heat-by-default-4ed05e8b82f41c4b.yaml b/releasenotes/notes/ephemeral-heat-by-default-4ed05e8b82f41c4b.yaml deleted file mode 100644 index 25637c52d..000000000 --- a/releasenotes/notes/ephemeral-heat-by-default-4ed05e8b82f41c4b.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - An ephemeral Heat process is now used by default for overcloud deployment. - On each overcloud management operation (deploy/update/upgrade), a - containerized Heat process will be started, the stack will be created new, - and then the Heat process will be stopped. The enable_heat option is - undercloud.conf is now defaulted to False. -deprecations: - - Setting enable_heat=True in undercloud.conf is deprecated. - - Using --heat-type=installed is deprecated with the openstack overcloud commands. diff --git a/releasenotes/notes/ephemeral-heat-minor-update-db24c2f190c1c8a7.yaml b/releasenotes/notes/ephemeral-heat-minor-update-db24c2f190c1c8a7.yaml deleted file mode 100644 index 1d6775542..000000000 --- a/releasenotes/notes/ephemeral-heat-minor-update-db24c2f190c1c8a7.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - During a minor update of the overcloud. It was previously necessary to - execute 3 steps. - - Update Prepare - - Update Run - - Update Converge - Starting in W, it is no longer necessary to perform a Stack update - during the converge. This change removes the stack update from the - converge step. Now, we will just run the deploy_steps_playbook instead. \ No newline at end of file diff --git a/releasenotes/notes/external-update-upgrade-8c354d66d8ad5ecc.yaml b/releasenotes/notes/external-update-upgrade-8c354d66d8ad5ecc.yaml deleted file mode 100644 index 86ee90343..000000000 --- a/releasenotes/notes/external-update-upgrade-8c354d66d8ad5ecc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - | - New `openstack overcloud external-update run` and `openstack - overcloud external-upgrade run` commands are defined. These are - meant to perform updates and upgrades for services deployed via - external_deploy_tasks. A separate command is used because external - installers don't fit well within the --nodes and --roles selection - pattern we've established for the normal `update run` and `upgrade - run` commands. diff --git a/releasenotes/notes/fail-on-wrong-env-dir-b1d0530052002e57.yaml b/releasenotes/notes/fail-on-wrong-env-dir-b1d0530052002e57.yaml deleted file mode 100644 index f5b38f48e..000000000 --- a/releasenotes/notes/fail-on-wrong-env-dir-b1d0530052002e57.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - ``overcloud deploy`` now fails early if a path specified for - ``--environment-directory`` does not exist. See `bug 1697031 - `__. diff --git a/releasenotes/notes/fix-container-undercloud-validations-admin-host-only-against-local-subnet-5e98a220e01e6c19.yaml b/releasenotes/notes/fix-container-undercloud-validations-admin-host-only-against-local-subnet-5e98a220e01e6c19.yaml deleted file mode 100644 index ef4c535cf..000000000 --- a/releasenotes/notes/fix-container-undercloud-validations-admin-host-only-against-local-subnet-5e98a220e01e6c19.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixes a validation issue, validation would fail when multiple ctlplane - subnets were defined in ``undercloud.conf``. - `Bug: 1791088 `_. diff --git a/releasenotes/notes/fix-dhcpv6-stateless-inspector-dnsmasq-range-9d0f4635ecce3d2f.yaml b/releasenotes/notes/fix-dhcpv6-stateless-inspector-dnsmasq-range-9d0f4635ecce3d2f.yaml deleted file mode 100644 index 7b3a83a96..000000000 --- a/releasenotes/notes/fix-dhcpv6-stateless-inspector-dnsmasq-range-9d0f4635ecce3d2f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Fixed an issue where the DHCP server for ironic-inspector was configured to - operate in DHCPv6-stateful mode when the undercloud configuration specified - DHCPv6-stateless mode. (See bug: `1853334 - `_) - diff --git a/releasenotes/notes/fix-fencing-action-parameter-8321d25a23d8ef99.yaml b/releasenotes/notes/fix-fencing-action-parameter-8321d25a23d8ef99.yaml deleted file mode 100644 index ff33a976b..000000000 --- a/releasenotes/notes/fix-fencing-action-parameter-8321d25a23d8ef99.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - The `overcloud generate fencing` command no longer carries a default value - for the `action` parameter. Advice from upstream Pacemaker is that this - value should no longer be passed to fencing agents as it may cause - inconsistent behaviour. The parameter remains, but is now optional, and - its use is discouraged. diff --git a/releasenotes/notes/fix-pass-mtu-option-inspector-dnsmasq-7c2f9d7d358ace07.yaml b/releasenotes/notes/fix-pass-mtu-option-inspector-dnsmasq-7c2f9d7d358ace07.yaml deleted file mode 100644 index 5150348eb..000000000 --- a/releasenotes/notes/fix-pass-mtu-option-inspector-dnsmasq-7c2f9d7d358ace07.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - The MTU setting was not configured for Ironic Inspector DHCP (dnsmasq) - service. This caused inspection to fail when operating on a network with - < 1500 bytes MTU. See bug: `1845487 - `_. - diff --git a/releasenotes/notes/get_rid_of_mistral_for_listing_validations-1e9dedf33675f1d6.yaml b/releasenotes/notes/get_rid_of_mistral_for_listing_validations-1e9dedf33675f1d6.yaml deleted file mode 100644 index 52d484072..000000000 --- a/releasenotes/notes/get_rid_of_mistral_for_listing_validations-1e9dedf33675f1d6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The TripleO Validator was using Mistral to get all the Validations available - on the Undercloud. From now, The CLI is parsing the Validations directly - from the filesystem and the Mistral support has been removed. diff --git a/releasenotes/notes/git-support-for-plans-883d622d2275ba3b.yaml b/releasenotes/notes/git-support-for-plans-883d622d2275ba3b.yaml deleted file mode 100644 index b3d750f7f..000000000 --- a/releasenotes/notes/git-support-for-plans-883d622d2275ba3b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Added argument for --source_url to overcloud create_plan workflow call. - The --source_url argument expects the url of a git repository containing - the Heat templates to deploy. diff --git a/releasenotes/notes/handle-failed-actions-cac0abd02ed67a51.yaml b/releasenotes/notes/handle-failed-actions-cac0abd02ed67a51.yaml deleted file mode 100644 index 7765f889c..000000000 --- a/releasenotes/notes/handle-failed-actions-cac0abd02ed67a51.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The result of Mistral actions will now be checked, and if they've failed, - an exception will be raised. See - https://bugs.launchpad.net/tripleo/+bug/1686811 diff --git a/releasenotes/notes/handle-no-deployment-status-a8f73f887f0f158f.yaml b/releasenotes/notes/handle-no-deployment-status-a8f73f887f0f158f.yaml deleted file mode 100644 index 6bb3544a1..000000000 --- a/releasenotes/notes/handle-no-deployment-status-a8f73f887f0f158f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - When requesting the deployment status of a non-existant plan, instead of - showing a traceback, show a helpful message indicating there is no status. diff --git a/releasenotes/notes/healthcheck_disabled-d0878072273d1496.yaml b/releasenotes/notes/healthcheck_disabled-d0878072273d1496.yaml deleted file mode 100644 index a12ae0001..000000000 --- a/releasenotes/notes/healthcheck_disabled-d0878072273d1496.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Disable the container healthchecks on the Undercloud in the undercloud.conf - with the new option container_healthcheck_disabled, false by default. diff --git a/releasenotes/notes/heat_launcher_podman-80870701fe4d99a5.yaml b/releasenotes/notes/heat_launcher_podman-80870701fe4d99a5.yaml deleted file mode 100644 index 131ffe08d..000000000 --- a/releasenotes/notes/heat_launcher_podman-80870701fe4d99a5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Switch the Heat Launcher to use Podman instead of Docker when - heat_native is disabled. diff --git a/releasenotes/notes/image-build-labels-97fda64f693cd8ba.yaml b/releasenotes/notes/image-build-labels-97fda64f693cd8ba.yaml deleted file mode 100644 index 666cf45e4..000000000 --- a/releasenotes/notes/image-build-labels-97fda64f693cd8ba.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - The container image build command now has the ability to inject labels - into various images being constructed. To add labels into a container, - the argument `--label` can be specified multiple times. The value is - always a key=value pair and each key must be unique. -other: - - The container image build label agument has the ability to do simple - string replacements following the python standard. Available options - for string replacement are `registry`, `namespace`, `prefix`, - `image`, `tag`, and `name`. Example usage - `--label component="%(prefix)s-%(name)s-container"`. diff --git a/releasenotes/notes/image-vendor-packages-a0d667e0c7aa1bbd.yaml b/releasenotes/notes/image-vendor-packages-a0d667e0c7aa1bbd.yaml deleted file mode 100644 index 8d1dc0deb..000000000 --- a/releasenotes/notes/image-vendor-packages-a0d667e0c7aa1bbd.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - | - Some packages previously built into the default overcloud-full image to - support vendor integration have been removed due to difficulties related - to CI stability. To add these packages for the vendor support you require - you can either build a custom image containing the packages (see - http://docs.openstack.org/developer/tripleo-docs/basic_deployment/basic_deployment_cli.html#get-images) - or you can use virt-customize (http://libguestfs.org/virt-customize.1.html) - to install the required extra packages in a pre-built overcloud-full image. diff --git a/releasenotes/notes/inclusive_language-8d44129de1e8099a.yaml b/releasenotes/notes/inclusive_language-8d44129de1e8099a.yaml deleted file mode 100644 index 22853c6b4..000000000 --- a/releasenotes/notes/inclusive_language-8d44129de1e8099a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - Replace non-inclusive blacklist and whitelist with - exclude and include. diff --git a/releasenotes/notes/inflight-validation-option-9e3f70b5bcb8dea9.yaml b/releasenotes/notes/inflight-validation-option-9e3f70b5bcb8dea9.yaml deleted file mode 100644 index 8dcadaf19..000000000 --- a/releasenotes/notes/inflight-validation-option-9e3f70b5bcb8dea9.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Introduce new "--inflight-validations" option in order to activate - those validations. It defaults to "False", since we don't want them by - default. diff --git a/releasenotes/notes/install-packages-for-overcloud-images-e3eacb9e0ec53b5b.yaml b/releasenotes/notes/install-packages-for-overcloud-images-e3eacb9e0ec53b5b.yaml deleted file mode 100644 index e46d1409e..000000000 --- a/releasenotes/notes/install-packages-for-overcloud-images-e3eacb9e0ec53b5b.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - A dnf installation of tripleo-common, tripleo-ironic-python-agent-builder, - openstack-tripleo-image-elements, openstack-tripleo-puppet-elements, and - xfsprogs has been included as part of the `openstack overcloud image build` - command. This allows us to drop these as always required dependencies for - tripleoclient. diff --git a/releasenotes/notes/invoke-plan-env-workflows-05421b1ee8be4733.yaml b/releasenotes/notes/invoke-plan-env-workflows-05421b1ee8be4733.yaml deleted file mode 100644 index f71dcca1a..000000000 --- a/releasenotes/notes/invoke-plan-env-workflows-05421b1ee8be4733.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Workflows, associated with plan-environment.yaml using - workflow_parameters, are invoked before the actual - deployment starts. diff --git a/releasenotes/notes/ipmi-discovery-aaee9fb7082ffac4.yaml b/releasenotes/notes/ipmi-discovery-aaee9fb7082ffac4.yaml deleted file mode 100644 index eeae265e4..000000000 --- a/releasenotes/notes/ipmi-discovery-aaee9fb7082ffac4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add new command ``openstack overcloud node discover`` for nodes discovery - by probing a range of IP addresses for accessible BMCs. diff --git a/releasenotes/notes/ipmi-lanplus-default-fb3ec3496853ed9e.yaml b/releasenotes/notes/ipmi-lanplus-default-fb3ec3496853ed9e.yaml deleted file mode 100644 index 1dee5317f..000000000 --- a/releasenotes/notes/ipmi-lanplus-default-fb3ec3496853ed9e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Deprecate --ipmi-lanplus for ``openstack overcloud generate fencing`` - command since now this is the default and add new option --ipmi-no-lanplus - to override it. diff --git a/releasenotes/notes/ipv6-address-mode-option-c85242d337d9e0d9.yaml b/releasenotes/notes/ipv6-address-mode-option-c85242d337d9e0d9.yaml deleted file mode 100644 index b87ae93b3..000000000 --- a/releasenotes/notes/ipv6-address-mode-option-c85242d337d9e0d9.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - The IPv6 addressing mode is now configurable for the undercloud - provisioning network. The option ``ipv6_address_mode`` (default: - ``dhpcv6-stateless``) in undercloud.conf is used to control the addressing - mode. Possible values: - - - **dhpcv6-stateless**: Address configuration using RA and optional - information using DHCPv6. - - **dhcpv6-stateful**: Address configuration and optional information - using DHCPv6. diff --git a/releasenotes/notes/make-ssh-enablement-timeouts-configurable-326124c81ce56fca.yaml b/releasenotes/notes/make-ssh-enablement-timeouts-configurable-326124c81ce56fca.yaml deleted file mode 100644 index af5d8db0d..000000000 --- a/releasenotes/notes/make-ssh-enablement-timeouts-configurable-326124c81ce56fca.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - Added `--overcloud-ssh-enable-timeout` to allow end users to increase the - wait time during the deploy, ffu, upgrade and admin actions. By default - this is 600 seconds. - - | - Added `--overcloud-ssh-port-timeout` to allow end users to increase the - time we wait for ssh to become ready on the hosts during the deploy, ffu, - upgrade and admin actions. On older hardware or slow booting hardware, the - 300 seconds we wait by default for the port to come up may not be sufficient. diff --git a/releasenotes/notes/modify-os-image-parameter-004b1cc81df2b88e.yaml b/releasenotes/notes/modify-os-image-parameter-004b1cc81df2b88e.yaml deleted file mode 100644 index fc5ce833e..000000000 --- a/releasenotes/notes/modify-os-image-parameter-004b1cc81df2b88e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added new os-image-name parameter, that replaces - os-image one, because it was conflicting with - glance parameter naming, causing os-image to - don't work properly. diff --git a/releasenotes/notes/move-to-undeploy-plan-067e6070b5f24fa3.yaml b/releasenotes/notes/move-to-undeploy-plan-067e6070b5f24fa3.yaml deleted file mode 100644 index 796e198f7..000000000 --- a/releasenotes/notes/move-to-undeploy-plan-067e6070b5f24fa3.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - openstack overcloud delete PLAN_NAME now instead of deleting the stack and - the plan instead it undeploys the plan to maintain the correct status - internally and deletes the stack. This is a backwards incompatible change - because we are no longer deleting the plan as it was done previously. diff --git a/releasenotes/notes/networks_data_option-6c613c0d118ccfc8.yaml b/releasenotes/notes/networks_data_option-6c613c0d118ccfc8.yaml deleted file mode 100644 index cac856525..000000000 --- a/releasenotes/notes/networks_data_option-6c613c0d118ccfc8.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - A new -n/--networks-data option has been added. This allows overriding the - default network_data.yaml, similar to the existing interface for - roles_data.yaml. diff --git a/releasenotes/notes/new-playbook-runner-interface-219d00b486ee2d7a.yaml b/releasenotes/notes/new-playbook-runner-interface-219d00b486ee2d7a.yaml deleted file mode 100644 index b4129568d..000000000 --- a/releasenotes/notes/new-playbook-runner-interface-219d00b486ee2d7a.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -features: - - | - A new interface has been created allowing deployers to run arbitrary - playbooks which are defined within a deployment plan. This interface is - being created to replace the existing Mistral interface, which is largely - used for HCI and NFV use cases. The interface will now process playbooks - when they're defined within a plan under the `playbook_parameters` key. - - Playbook entries can be defined with, and without the base path. If no base - path is defined within the entry, the interface will fall back to the - constant tripleo playbook path, `/usr/share/ansible/tripleo-playbooks`. - Options fined within a playbook entry will be passed into the playbook at - runtime using extra-vars. - - * Interface usage example - - .. code-block:: yaml - - playbook_parameters: - sample-playbook-0.yaml: - x: 1 - y: a - /path/to/sample-playbook-1.yaml: - x: a - y: 1 diff --git a/releasenotes/notes/no-cisco-bdddc17abb5f2847.yaml b/releasenotes/notes/no-cisco-bdddc17abb5f2847.yaml deleted file mode 100644 index ef95b88da..000000000 --- a/releasenotes/notes/no-cisco-bdddc17abb5f2847.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Support for the ``cisco-ucs-managed`` and ``cisco-ucs-standalone`` - hardware types has been removed since these hardware types have been - removed from Ironic due to lack of maintenance. diff --git a/releasenotes/notes/node-unprovision-dd400e58f2f479a2.yaml b/releasenotes/notes/node-unprovision-dd400e58f2f479a2.yaml deleted file mode 100644 index b976b5279..000000000 --- a/releasenotes/notes/node-unprovision-dd400e58f2f479a2.yaml +++ /dev/null @@ -1,10 +0,0 @@ -features: - - | - New command "openstack overcloud node unprovision" - - This is a companion command to "openstack overcloud node provision" - which is required for scale-down and should be run after "openstack - overcloud deploy". - - It will undeploy any instance which has provisioned=False in the - supplied roles yaml. \ No newline at end of file diff --git a/releasenotes/notes/noop-fencing-action-parameter-6ca390e9ebf43796.yaml b/releasenotes/notes/noop-fencing-action-parameter-6ca390e9ebf43796.yaml deleted file mode 100644 index 740e1ef4f..000000000 --- a/releasenotes/notes/noop-fencing-action-parameter-6ca390e9ebf43796.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The ```action``` parameter for overcloud fencing generation is now ignored. - This is because recent versions of the underlying fencing agents now - produce an error if the action parameter is used. Previously the use of the - parameter was discouraged. diff --git a/releasenotes/notes/oc-deploy-force-pre-provisioned-b50159643a31b3d2.yaml b/releasenotes/notes/oc-deploy-force-pre-provisioned-b50159643a31b3d2.yaml deleted file mode 100644 index 053b32aae..000000000 --- a/releasenotes/notes/oc-deploy-force-pre-provisioned-b50159643a31b3d2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Overcloud deployment no longer supports deploying overcloud nodes by Heat. - Because of this change, the ``openstack overcloud deploy`` command now - always fails if the ``--provisioned-nodes`` option is used. diff --git a/releasenotes/notes/openstack-overcloud-export-293c8f0f6ab13e91.yaml b/releasenotes/notes/openstack-overcloud-export-293c8f0f6ab13e91.yaml deleted file mode 100644 index b48264977..000000000 --- a/releasenotes/notes/openstack-overcloud-export-293c8f0f6ab13e91.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A new command "openstack overcloud export" is added. The command is used to - export the data from a control stack for use in a compute stack for the - multi-stack feature. diff --git a/releasenotes/notes/openstack-overcloud-export-ceph-f36421e1685db302.yaml b/releasenotes/notes/openstack-overcloud-export-ceph-f36421e1685db302.yaml deleted file mode 100644 index c4bea2377..000000000 --- a/releasenotes/notes/openstack-overcloud-export-ceph-f36421e1685db302.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - A new command "openstack overcloud export ceph" is added. The command is - used to export the Ceph deployment data from one stack for use in another - stack with storage services which use that Ceph cluster when using the - multi-stack deployment feature. diff --git a/releasenotes/notes/openstack-undercloud-backup-b0c83afeb565c41d.yaml b/releasenotes/notes/openstack-undercloud-backup-b0c83afeb565c41d.yaml deleted file mode 100644 index 0e245cfd4..000000000 --- a/releasenotes/notes/openstack-undercloud-backup-b0c83afeb565c41d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add a new option to the TripleO client in order to create an Undercloud - backup. - Usage, openstack undercloud backup [--add-path ADD_FILES_TO_BACKUP] diff --git a/releasenotes/notes/option-ironic-network-interfaces-7e185f5fd67c7500.yaml b/releasenotes/notes/option-ironic-network-interfaces-7e185f5fd67c7500.yaml deleted file mode 100644 index a53c605ef..000000000 --- a/releasenotes/notes/option-ironic-network-interfaces-7e185f5fd67c7500.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - The network interface drivers for the Baremetal service on the undercloud - is now configurable. New undercloud.conf options - ``enabled_network_interfaces`` (Default: ``flat``) and - ``default_network_interface`` (Default: ``flat``) control the enabled - network interface and the default network interface when enrolling nodes. diff --git a/releasenotes/notes/os-cloud-config-b2acae54b4f0baf2.yaml b/releasenotes/notes/os-cloud-config-b2acae54b4f0baf2.yaml deleted file mode 100644 index 600a2b25c..000000000 --- a/releasenotes/notes/os-cloud-config-b2acae54b4f0baf2.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - Remove all usage of os-cloud-config. - Indeed, os-cloud-config has been deprecated in Ocata and is not useful anymore - from tripleoclient. Let's stop using it and remove all the code that used it, - including the tests. diff --git a/releasenotes/notes/overcloud-cell-export-4a51243ab002935a.yaml b/releasenotes/notes/overcloud-cell-export-4a51243ab002935a.yaml deleted file mode 100644 index b37b4718e..000000000 --- a/releasenotes/notes/overcloud-cell-export-4a51243ab002935a.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - | - [1] provides the steps on how to setup multiple cells using tripleo. - This requires to extract deployment information from the overcloud - /control plane stack which then is used as input for the cell deployment. - - With this patch we provide a new tripleoclient functionality which helps - to automate the export steps from [1]: - * Export the default cell EndpointMap - * Export the default cell HostsEntry - * Export AllNodesConfig and GlobalConfig information - * Export passwords - - [1] https://docs.openstack.org/tripleo-docs/latest/install/advanced_deployment/deploy_cellv2.html#deploy-an-additional-nova-cell-v2 diff --git a/releasenotes/notes/overcloud-credentials-command-f9e8d7439fee02d5.yaml b/releasenotes/notes/overcloud-credentials-command-f9e8d7439fee02d5.yaml deleted file mode 100644 index 9079770d8..000000000 --- a/releasenotes/notes/overcloud-credentials-command-f9e8d7439fee02d5.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - A new command, "openstack overcloud credentials" has been added to create - the overcloudrc and overcloudrc.v3 files for your deployment. This is - particularly useful if the deploy is started from the GUI. -issues: - - | - Fixed an issue with the permissions of the overcloudrc.v3 file. The chmod - call was not being used on it correctly. diff --git a/releasenotes/notes/overcloud-delete-59fea2cd43cc9dd5.yaml b/releasenotes/notes/overcloud-delete-59fea2cd43cc9dd5.yaml deleted file mode 100644 index b544a4d97..000000000 --- a/releasenotes/notes/overcloud-delete-59fea2cd43cc9dd5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Fixes `bug 1657461 - `__ so the - overcloud stack is actually deleted. This calls the newly - created stack delete workflow. diff --git a/releasenotes/notes/overcloud-deploy-history-4a54b53ac10e6542.yaml b/releasenotes/notes/overcloud-deploy-history-4a54b53ac10e6542.yaml deleted file mode 100644 index 91c9cb935..000000000 --- a/releasenotes/notes/overcloud-deploy-history-4a54b53ac10e6542.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - Each deploy command run by a user is now track in an history file under - $HOME/.tripleo directory. It allow to retrieve all the previous calls from - the openstack overcloud deploy command, the environment files used and the - templates directories used, for analysis, debugging or tracking. See - https://bugs.launchpad.net/tripleo/+bug/1673700 diff --git a/releasenotes/notes/overcloud-export-passwords-bfa698491601a734.yaml b/releasenotes/notes/overcloud-export-passwords-bfa698491601a734.yaml deleted file mode 100644 index 94542f473..000000000 --- a/releasenotes/notes/overcloud-export-passwords-bfa698491601a734.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - openstack overcloud export now exports user defined password values instead - of just always exporting the generated password values. diff --git a/releasenotes/notes/overcloud-failures-0e98b37251fb2be2.yaml b/releasenotes/notes/overcloud-failures-0e98b37251fb2be2.yaml deleted file mode 100644 index 9bc0c39eb..000000000 --- a/releasenotes/notes/overcloud-failures-0e98b37251fb2be2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new command, openstack overcloud failures, is added to show - the failures from a deployment plan when using config-download. diff --git a/releasenotes/notes/overcloud-node-bios-c9ae89e35a96c7b1.yaml b/releasenotes/notes/overcloud-node-bios-c9ae89e35a96c7b1.yaml deleted file mode 100644 index 11c2d4c9c..000000000 --- a/releasenotes/notes/overcloud-node-bios-c9ae89e35a96c7b1.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - | - Adds new commands to run BIOS cleaning on nodes:: - - openstack overcloud node bios configure \ - --configuration <..> [--all-manageable|uuid1,uuid2,..] - - openstack overcloud node bios reset \ - [--all-manageable|uuid1,uuid2,..] - - The first command configures given BIOS settings on given nodes or all - manageable nodes; the second command reset BIOS settings to factory - default on given nodes or all manageable nodes. diff --git a/releasenotes/notes/overcloud-node-clean-981790791a0d0246.yaml b/releasenotes/notes/overcloud-node-clean-981790791a0d0246.yaml deleted file mode 100644 index c48e9e0ae..000000000 --- a/releasenotes/notes/overcloud-node-clean-981790791a0d0246.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds new command to run metadata cleaning on nodes:: - - openstack overcloud node clean [--all-manageable|uuid1,uuid2,..] diff --git a/releasenotes/notes/overcloud-profiles-compute-0e12922b3db70285.yaml b/releasenotes/notes/overcloud-profiles-compute-0e12922b3db70285.yaml deleted file mode 100644 index 583af001c..000000000 --- a/releasenotes/notes/overcloud-profiles-compute-0e12922b3db70285.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - Add ``--all`` argument to the ``overcloud profiles list`` command to - also display nodes that cannot be deployed on. A new ``Error`` column - is displayed when this argument is provided. -fixes: - - | - Exclude from the output of ``overcloud profiles list`` nodes that: - - * have error power state - * do not have a matching hypervisor request - * have their compute service down. diff --git a/releasenotes/notes/overcloud-status-69d3cc931f50930e.yaml b/releasenotes/notes/overcloud-status-69d3cc931f50930e.yaml deleted file mode 100644 index 21a5177a5..000000000 --- a/releasenotes/notes/overcloud-status-69d3cc931f50930e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new command, openstack overcloud status, is added to show - the status of a deployment plan when using config-download. diff --git a/releasenotes/notes/overcloud_ceph_deploy-485f59b64eb93c70.yaml b/releasenotes/notes/overcloud_ceph_deploy-485f59b64eb93c70.yaml deleted file mode 100644 index e6cfd6fb8..000000000 --- a/releasenotes/notes/overcloud_ceph_deploy-485f59b64eb93c70.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - A new command "openstack overcloud ceph deploy" is added. The command is - used to deploy Ceph after the hardware has been provisioned with networking - and before the overcloud is deployed. The command takes the output of - "openstack overcloud node provision" as input and returns a Heat enviornment - file, e.g. deployed_ceph.yaml, as output. The deployed_ceph.yaml file may then - be passed to the "openstack overcloud deploy" command as input. During overcloud - deployment the Ceph cluster is then configured to host OpenStack. E.g. cephx keys - and pools are still created on the Ceph cluster by "openstack overcloud deploy". \ No newline at end of file diff --git a/releasenotes/notes/overcloud_ceph_spec-e1cfd358c4db2b22.yaml b/releasenotes/notes/overcloud_ceph_spec-e1cfd358c4db2b22.yaml deleted file mode 100644 index e2182bedb..000000000 --- a/releasenotes/notes/overcloud_ceph_spec-e1cfd358c4db2b22.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - | - New command "openstack overcloud ceph spec" has been added. This command - may be used to create a cephadm spec file as a function of the output of - metalsmith and a TripleO roles file. For example, if metalsmith output a - file with multiple hosts of differing roles and each role contained various - Ceph services, then a cephadm spec file could parse these files and return - input compatible with cephadm. The ceph spec file may be then be passed to - "openstack overcloud ceph deploy" so that cephadm deploys only those Ceph - services on those hosts. This feature should save users from the need to - create two different files containing much of the same data and make it - easier and less error prone to include Ceph in a deployment without the - need to manually create the Ceph spec file. diff --git a/releasenotes/notes/override-plan-env-e3df45a51bda717e.yaml b/releasenotes/notes/override-plan-env-e3df45a51bda717e.yaml deleted file mode 100644 index 2a1519203..000000000 --- a/releasenotes/notes/override-plan-env-e3df45a51bda717e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds optional override of the plan environment file with custom - plan environment file. diff --git a/releasenotes/notes/paunch_retiring-eed2580b80dcbe74.yaml b/releasenotes/notes/paunch_retiring-eed2580b80dcbe74.yaml deleted file mode 100644 index ba7583698..000000000 --- a/releasenotes/notes/paunch_retiring-eed2580b80dcbe74.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - | - undercloud_enable_paunch option is removed as we retire Paunch project. diff --git a/releasenotes/notes/paunch_undercloud-2ad99f417be01355.yaml b/releasenotes/notes/paunch_undercloud-2ad99f417be01355.yaml deleted file mode 100644 index cd93822f4..000000000 --- a/releasenotes/notes/paunch_undercloud-2ad99f417be01355.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Paunch is now disabled by default when the Undercloud is deployed - and upgraded. Containers will be started by Ansible and not Paunch - anymore. One can enable Paunch again with undercloud_enable_paunch - set to True. diff --git a/releasenotes/notes/per-subnet-nameservers-d53b5cdc6d099a6a.yaml b/releasenotes/notes/per-subnet-nameservers-d53b5cdc6d099a6a.yaml deleted file mode 100644 index 18fa58624..000000000 --- a/releasenotes/notes/per-subnet-nameservers-d53b5cdc6d099a6a.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Undercloud ``ctlplane`` subnets can now have individual nameservers - (per-subnet option ``dns_nameservers``). If no subnet specific nameservers - are specified for a subnet it will fall back to ``undercloud_nameservers``. diff --git a/releasenotes/notes/plan-export-command-3fb76c91c77d7b24.yaml b/releasenotes/notes/plan-export-command-3fb76c91c77d7b24.yaml deleted file mode 100644 index 27fd2a17a..000000000 --- a/releasenotes/notes/plan-export-command-3fb76c91c77d7b24.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add a new plan export command for exporting deployment plans. diff --git a/releasenotes/notes/port-physnet-cidr-map-463a1639aba45832.yaml b/releasenotes/notes/port-physnet-cidr-map-463a1639aba45832.yaml deleted file mode 100644 index 77886d7de..000000000 --- a/releasenotes/notes/port-physnet-cidr-map-463a1639aba45832.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The undercloud install now configures ironic inspector to automatically - populate the physical_network field on baremetal provisioning ports. See - bug `1870529 `_. diff --git a/releasenotes/notes/post_upgrade_tasks_undercloud_standalone-d9914f6b52c237ce.yaml b/releasenotes/notes/post_upgrade_tasks_undercloud_standalone-d9914f6b52c237ce.yaml deleted file mode 100644 index 4a6dddb21..000000000 --- a/releasenotes/notes/post_upgrade_tasks_undercloud_standalone-d9914f6b52c237ce.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - This changes the upgrade workflow for the standalone and undercloud - where we now run the post_upgrade_tasks after the deployment. - So the order is upgrade_tasks, deployment steps (docker/puppet), - then post_upgrade_tasks which is the same order as the overcloud. - It will allow us to execute some specific post upgrade tasks on standalone - and undercloud, like removing Docker containers when upgrading to Podman. diff --git a/releasenotes/notes/prepare-environment-directory-aa86ad3935aec192.yaml b/releasenotes/notes/prepare-environment-directory-aa86ad3935aec192.yaml deleted file mode 100644 index 7dd743045..000000000 --- a/releasenotes/notes/prepare-environment-directory-aa86ad3935aec192.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add '--environment-directory' option to the `openstack overcloud container - image prepare` command. diff --git a/releasenotes/notes/prepare-include-86d96ff1d7bdc44d.yaml b/releasenotes/notes/prepare-include-86d96ff1d7bdc44d.yaml deleted file mode 100644 index b101ffa54..000000000 --- a/releasenotes/notes/prepare-include-86d96ff1d7bdc44d.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The command `openstack overcloud container image prepare` command now has - an `--include` argument which will filter entries if they do not match any - of the include expressions. diff --git a/releasenotes/notes/prepare-service-4281d7358be7450a.yaml b/releasenotes/notes/prepare-service-4281d7358be7450a.yaml deleted file mode 100644 index 00ec48643..000000000 --- a/releasenotes/notes/prepare-service-4281d7358be7450a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The "openstack overcloud container image prepare" command can now filter - the image list by the containerized services being deployed. This is done by - specifying the heat environment files which enable containerized services. diff --git a/releasenotes/notes/queens-upgrade-cli-add-roles-nodes.yaml-6ac6ecda01286006.yaml b/releasenotes/notes/queens-upgrade-cli-add-roles-nodes.yaml-6ac6ecda01286006.yaml deleted file mode 100644 index 30bb74a69..000000000 --- a/releasenotes/notes/queens-upgrade-cli-add-roles-nodes.yaml-6ac6ecda01286006.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -upgrade: - - | - This adds the new --roles and --nodes parameters for the Queens major - upgrade cli, specifically for the 'openstack overcloud upgrade run' which - executes the ansible playbooks on overcloud nodes. - - openstack overcloud upgrade run --nodes compute-0 compute-1 - openstack overcloud upgrade run --roles Controller - - Nodes for controlplane roles (the default 'Controller' role for example) - need to be upgraded using the --roles parameter as these nodes must be - upgraded together/in parallel. - - For non controlplane roles the --nodes parameter can be used to limit the - upgrade run to one or more nodes as specified by the operator. diff --git a/releasenotes/notes/refactor-only-cli-args-cb70ed8ba8b166a9.yaml b/releasenotes/notes/refactor-only-cli-args-cb70ed8ba8b166a9.yaml deleted file mode 100644 index f5c98bc8c..000000000 --- a/releasenotes/notes/refactor-only-cli-args-cb70ed8ba8b166a9.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - The cli arguments that control what parts of the deployment to execute - have been refactored to better align with the user expected intention, - --stack-only: create the stack, download the config. no overcloud node - changes - --setup-only: ssh admin authorization setup. - --config-download-only: run config-download playbook(s) to configure the - overcloud. diff --git a/releasenotes/notes/refuse-to-label-an-md-device-for-root-device-8ad0c1e85292ca0a.yaml b/releasenotes/notes/refuse-to-label-an-md-device-for-root-device-8ad0c1e85292ca0a.yaml deleted file mode 100644 index ce90eedc6..000000000 --- a/releasenotes/notes/refuse-to-label-an-md-device-for-root-device-8ad0c1e85292ca0a.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -fixes: - - | - Fixes incorrect handling of root device hints when Software RAID is in - use with Ironic. Users may re-introspect and an automatic root device hint - would be added, which is incorrect and can lead to a failed deployment - due to Software RAID (MD) device names being inconsistent across reboot - from being configured to utilized. Ironic ultimately understands these - devices and should choose the correct device by default if present. - We now log an Warning and do not insert a potentially incorrect root - device hint. Operators using a complex set of disks may still need to - explicitly set a root device hint should their operational state require - it. diff --git a/releasenotes/notes/remove-abort-9aa90d73d09507c9.yaml b/releasenotes/notes/remove-abort-9aa90d73d09507c9.yaml deleted file mode 100644 index 9e125b6f2..000000000 --- a/releasenotes/notes/remove-abort-9aa90d73d09507c9.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - The update abort command was introduced many releases ago. However, it is - not a safe operation in the context of TripleO. The TripleO Heat stack - could become irrepairably damage should a rollback be attempted. As such, - it is best to remove this functionality without a deprecation period. - The workaround for this command is to wait until the stack times out or - completes the update. \ No newline at end of file diff --git a/releasenotes/notes/remove-config-download-command-1427d6609412e3e3.yaml b/releasenotes/notes/remove-config-download-command-1427d6609412e3e3.yaml deleted file mode 100644 index c70614a57..000000000 --- a/releasenotes/notes/remove-config-download-command-1427d6609412e3e3.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - The overcloud config download command no longer works with Ephemeral Heat. - We already have the ability to do the same thing using overcloud deploy using - the --stack-only flag. As such, the overcloud config download command is being - removed. diff --git a/releasenotes/notes/remove-default-overcloud-ssh-key-7341a84480727234.yaml b/releasenotes/notes/remove-default-overcloud-ssh-key-7341a84480727234.yaml deleted file mode 100644 index 3402f1423..000000000 --- a/releasenotes/notes/remove-default-overcloud-ssh-key-7341a84480727234.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The default value for --overcloud-ssh-key was wrong (it was actually a - username). Instead of using a default value at all, just remove the default - since we can't sanely pick one. diff --git a/releasenotes/notes/remove-derived-parameters-2be57032391b52e7.yaml b/releasenotes/notes/remove-derived-parameters-2be57032391b52e7.yaml deleted file mode 100644 index 4a58cb080..000000000 --- a/releasenotes/notes/remove-derived-parameters-2be57032391b52e7.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The derive parameters ``--plan-environment-file`` option of the - ``overcloud deploy`` command has been removed. diff --git a/releasenotes/notes/remove-docker_bip-20d0249b45078dc2.yaml b/releasenotes/notes/remove-docker_bip-20d0249b45078dc2.yaml deleted file mode 100644 index 8241e51fa..000000000 --- a/releasenotes/notes/remove-docker_bip-20d0249b45078dc2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - | - `docker_bip` has been removed from the undercloud.conf diff --git a/releasenotes/notes/remove-flavor-and-count-option-from-overcloud-deploy-c5f0d0a40b013ec4.yaml b/releasenotes/notes/remove-flavor-and-count-option-from-overcloud-deploy-c5f0d0a40b013ec4.yaml deleted file mode 100644 index 4aa1a0d82..000000000 --- a/releasenotes/notes/remove-flavor-and-count-option-from-overcloud-deploy-c5f0d0a40b013ec4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - The ``--*-flavor`` options and the ``--*-count`` options were removed from - the ``overcloud deploy`` command. diff --git a/releasenotes/notes/remove-heat-type-installed-6c7a90af45f57231.yaml b/releasenotes/notes/remove-heat-type-installed-6c7a90af45f57231.yaml deleted file mode 100644 index 9bd868a0d..000000000 --- a/releasenotes/notes/remove-heat-type-installed-6c7a90af45f57231.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - The --heat-type argument for openstack overcloud deploy no longer accepts a - value of "installed", as using an undercloud installed Heat to deploy the - overcloud is no longer supported. diff --git a/releasenotes/notes/remove-neutron-dhcp-agents-per-network-1f34e5deeba71cda.yaml b/releasenotes/notes/remove-neutron-dhcp-agents-per-network-1f34e5deeba71cda.yaml deleted file mode 100644 index 4d5e948a0..000000000 --- a/releasenotes/notes/remove-neutron-dhcp-agents-per-network-1f34e5deeba71cda.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Removed setting NeutronDhcpAgentsPerNetwork based on controller count. - If not overridden by the user, it should be calculated based on the - number of neutron DHCP agents that are actually deployed. diff --git a/releasenotes/notes/remove-no-workflow-b3ad07729d3768d1.yaml b/releasenotes/notes/remove-no-workflow-b3ad07729d3768d1.yaml deleted file mode 100644 index 34fba9a88..000000000 --- a/releasenotes/notes/remove-no-workflow-b3ad07729d3768d1.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrades: - - | - The ``--no-workflow`` option has been removed from the following command. - - - ``openstack overcloud external update run`` - - ``openstack overcloud external upgrade run`` - - ``openstack overcloud update run`` diff --git a/releasenotes/notes/remove-node-delete-deprecated-switches-299df71bf6dfe450.yaml b/releasenotes/notes/remove-node-delete-deprecated-switches-299df71bf6dfe450.yaml deleted file mode 100644 index c75b3d98c..000000000 --- a/releasenotes/notes/remove-node-delete-deprecated-switches-299df71bf6dfe450.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Removing the deprecated ``-e`` and ``--templates`` switches to - `overcloud node delete`. This is causing confusion to the operators. diff --git a/releasenotes/notes/remove-overcloud-container-commands-fe7185ee87aeda3a.yaml b/releasenotes/notes/remove-overcloud-container-commands-fe7185ee87aeda3a.yaml deleted file mode 100644 index 7b36558b7..000000000 --- a/releasenotes/notes/remove-overcloud-container-commands-fe7185ee87aeda3a.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - Removed `overcloud container image upload`, `overcloud container image build`, - `overcloud container image prepare` and `overcloud container image tag` - commands as the `tripleo container` command replaced those in Train and - they no longer work. diff --git a/releasenotes/notes/remove-overcloud-failures-90e18749b7b8c960.yaml b/releasenotes/notes/remove-overcloud-failures-90e18749b7b8c960.yaml deleted file mode 100644 index ff96600a6..000000000 --- a/releasenotes/notes/remove-overcloud-failures-90e18749b7b8c960.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - ``openstack overcloud failures`` command used to get ansible - errors with mistral api has been removed as part of removal - of mistral service from undercloud. diff --git a/releasenotes/notes/remove-overcloud-parameters-set-346ef5321475deea.yaml b/releasenotes/notes/remove-overcloud-parameters-set-346ef5321475deea.yaml deleted file mode 100644 index 3e65d49da..000000000 --- a/releasenotes/notes/remove-overcloud-parameters-set-346ef5321475deea.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - ``openstack overcloud parameters set`` command has been removed. - It was used to update the plan parameters. Now that we use sever - side parameter merging and no plan, this is not relevant anymore. diff --git a/releasenotes/notes/remove-overcloud-plan-commands-69e89097fc9351fa.yaml b/releasenotes/notes/remove-overcloud-plan-commands-69e89097fc9351fa.yaml deleted file mode 100644 index 241335f2a..000000000 --- a/releasenotes/notes/remove-overcloud-plan-commands-69e89097fc9351fa.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - ``openstack overcloud plan *`` commands have been removed. - These commands are irrelevant as overcloud deploy/update/upgrade - does not create/update swift plan anymore. Also, some of the - ``openstack overcloud role`` commands that use swift plan have - been removed. diff --git a/releasenotes/notes/remove-overcloud-remote-execute-c04b4f3aecddaf1c.yaml b/releasenotes/notes/remove-overcloud-remote-execute-c04b4f3aecddaf1c.yaml deleted file mode 100644 index 33021d1af..000000000 --- a/releasenotes/notes/remove-overcloud-remote-execute-c04b4f3aecddaf1c.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - ``openstack overcloud remote execute`` command has been removed. - It depended on os-collect-config service on the overcloud nodes, - that had been disabled by default since rocky. Please use ansible - playbooks to make necessary configuration changes in place of this - command. diff --git a/releasenotes/notes/remove-panko-deprecated-in-train-0444baa3ba4688f1.yaml b/releasenotes/notes/remove-panko-deprecated-in-train-0444baa3ba4688f1.yaml deleted file mode 100644 index 2e485152a..000000000 --- a/releasenotes/notes/remove-panko-deprecated-in-train-0444baa3ba4688f1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - Panko which is part of the telemetry services has been deprecated in - Train. From now on, it is not deployed anymore in the Undercloud if the - telemetry has been enabled during the installation. diff --git a/releasenotes/notes/remove-standalone-paramter-e7f4d978c7f6261c.yaml b/releasenotes/notes/remove-standalone-paramter-e7f4d978c7f6261c.yaml deleted file mode 100644 index be8eaf3fb..000000000 --- a/releasenotes/notes/remove-standalone-paramter-e7f4d978c7f6261c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Removed --standalone from `openstack tripleo deploy` command. diff --git a/releasenotes/notes/remove-tempest-container-support-f25054588ad1a860.yaml b/releasenotes/notes/remove-tempest-container-support-f25054588ad1a860.yaml deleted file mode 100644 index 535b317af..000000000 --- a/releasenotes/notes/remove-tempest-container-support-f25054588ad1a860.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -removal: - - | - Support of Tempest container is removed in favor of os_tempest - ansible role. - As it is no longer tested by the CI team and not used anywhere. - Since tempest is a standalone OpenStack validation tool, can be - used via installing through rpm package, which is fully supported. diff --git a/releasenotes/notes/remove-tripleo-ui-0176ef82f8563b92.yaml b/releasenotes/notes/remove-tripleo-ui-0176ef82f8563b92.yaml deleted file mode 100644 index 4db86a7a4..000000000 --- a/releasenotes/notes/remove-tripleo-ui-0176ef82f8563b92.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - | - `enable_ui` has been removed from the undercloud configuration options. diff --git a/releasenotes/notes/remove-undercloud-deploy-3cbbfe5f159cc71d.yaml b/releasenotes/notes/remove-undercloud-deploy-3cbbfe5f159cc71d.yaml deleted file mode 100644 index e90520f66..000000000 --- a/releasenotes/notes/remove-undercloud-deploy-3cbbfe5f159cc71d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - The `openstack undercloud deploy` command has been removed. Use `openstack - tripleo deploy` instead. diff --git a/releasenotes/notes/remove-undercloud_update_packages-1fa2b7a1afda3258.yaml b/releasenotes/notes/remove-undercloud_update_packages-1fa2b7a1afda3258.yaml deleted file mode 100644 index b998a6241..000000000 --- a/releasenotes/notes/remove-undercloud_update_packages-1fa2b7a1afda3258.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - | - Remove `undercloud_update_packages` option in undercloud.conf. diff --git a/releasenotes/notes/remove-upgrade-converge-12106c08ffd1be3b.yaml b/releasenotes/notes/remove-upgrade-converge-12106c08ffd1be3b.yaml deleted file mode 100644 index 75c5dacd0..000000000 --- a/releasenotes/notes/remove-upgrade-converge-12106c08ffd1be3b.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - Remove upgrade converge. This step was required to converge - the Heat stack information with the upgraded environment. - With Ephemeral Heat, this is no longer necessary and we can - remove it to reduce upgrade steps and complexity. diff --git a/releasenotes/notes/remove-upgrade_cleanup-5b341a8a71394389.yaml b/releasenotes/notes/remove-upgrade_cleanup-5b341a8a71394389.yaml deleted file mode 100644 index 2e93a1732..000000000 --- a/releasenotes/notes/remove-upgrade_cleanup-5b341a8a71394389.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - `upgrade_cleanup` has been removed from the undercloud.conf as it hasn't - had any effect after the initial containerized undercloud back in Rocky - or FFU upgrade in Train. diff --git a/releasenotes/notes/remove_ffwd-upgrade_commands-579f885957f02863.yaml b/releasenotes/notes/remove_ffwd-upgrade_commands-579f885957f02863.yaml deleted file mode 100644 index 806050109..000000000 --- a/releasenotes/notes/remove_ffwd-upgrade_commands-579f885957f02863.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - | - The ffwd-upgrade command isn't needed anymore since Queens. The ffwd upgrade - now relies on the overcloud upgrade commands, therefore there is no need to - keep the old ffwd-upgrade commands around as they could just cause confusion - to the user. diff --git a/releasenotes/notes/rename-prepare-cmd-args-f867df198d53943b.yaml b/releasenotes/notes/rename-prepare-cmd-args-f867df198d53943b.yaml deleted file mode 100644 index fc91e5d6a..000000000 --- a/releasenotes/notes/rename-prepare-cmd-args-f867df198d53943b.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -deprecations: - - | - Renamed some options to the `openstack overcloud container image prepare` - command for clarity. The '--images-file' option was renamed to - '--output-images-file'. The '--env-file' option was renamed to - '--output-env-file'. The '--service-environment-file' option was renamed - to '--environment-file'. The old options are still in place and show - a deprecation warning in the help message. diff --git a/releasenotes/notes/retire-instack-undercloud-1cd802a8cc437f7c.yaml b/releasenotes/notes/retire-instack-undercloud-1cd802a8cc437f7c.yaml deleted file mode 100644 index 4d1a7223e..000000000 --- a/releasenotes/notes/retire-instack-undercloud-1cd802a8cc437f7c.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - The --use-heat option from undercloud install has been removed along with - the ability to deploy an undercloud on baremetal using instack-undercloud - via 'openstack undercloud install'. Manual deployment may still be possible - by directly invoking the instack-undercloud scripts but it is no longer - available via the 'openstack undercloud install' command. diff --git a/releasenotes/notes/return-code-on-predeploy-failure-bd62025646e25433.yaml b/releasenotes/notes/return-code-on-predeploy-failure-bd62025646e25433.yaml deleted file mode 100644 index 8261145cf..000000000 --- a/releasenotes/notes/return-code-on-predeploy-failure-bd62025646e25433.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - `overcloud deploy` correctly returns an error code when failing - during the pre-deployment verifications (before the stack is - launched) (`bug 1672790 - `__). diff --git a/releasenotes/notes/role-management-commands-fc2f67dd5e81016e.yaml b/releasenotes/notes/role-management-commands-fc2f67dd5e81016e.yaml deleted file mode 100644 index 32c8ff896..000000000 --- a/releasenotes/notes/role-management-commands-fc2f67dd5e81016e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Added new commands for listing available example roles and generating - role_data.yaml files for an environment. ``openstack overcloud roles list`` - provides a list of available roles shipped with tripleo-heat-templates. - ``openstack overcloud role info`` lists out the details of the specific role. - ``openstack overcloud roles generate`` can be used with the available role - names to create a roles_data.yaml used by the deploy command. diff --git a/releasenotes/notes/role-specific-warning-63ce21643d51236c.yaml b/releasenotes/notes/role-specific-warning-63ce21643d51236c.yaml deleted file mode 100644 index 2fea19dcb..000000000 --- a/releasenotes/notes/role-specific-warning-63ce21643d51236c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a warning message if user has provided an invalid role-specific - parameter in the environment file. diff --git a/releasenotes/notes/roles_file-8adea5990682504d.yaml b/releasenotes/notes/roles_file-8adea5990682504d.yaml deleted file mode 100644 index 9c6896042..000000000 --- a/releasenotes/notes/roles_file-8adea5990682504d.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - | - The roles data file may take either an absolute path or the path relative - to the tripleo heat templates directory. This is now applicable for all - of the commands involving roles data files. diff --git a/releasenotes/notes/run-validations-using-cli-b3f4aa43e9f4eb3e.yaml b/releasenotes/notes/run-validations-using-cli-b3f4aa43e9f4eb3e.yaml deleted file mode 100644 index 0e1879331..000000000 --- a/releasenotes/notes/run-validations-using-cli-b3f4aa43e9f4eb3e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Running and listing the validations was only possible by executing - the Mistral workflow associated with those tasks. - Now we have the possibility of running and listing the validations - using the TripleO CLI. - The commands added are 'openstack tripleo validator run' and - 'openstack tripleo validator list' with its corresponding parameters. diff --git a/releasenotes/notes/save-stack-outputs-61c2ad9528ae2529.yaml b/releasenotes/notes/save-stack-outputs-61c2ad9528ae2529.yaml deleted file mode 100644 index 68272ac16..000000000 --- a/releasenotes/notes/save-stack-outputs-61c2ad9528ae2529.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - Stack outputs that are needed by other functionality of the overcloud - deployment are now saved in the stack working directory in an outputs - subdirectory (default ~/overcloud-deploy//outputs). diff --git a/releasenotes/notes/scale-params-error-4fa64ae7569ab3f4.yaml b/releasenotes/notes/scale-params-error-4fa64ae7569ab3f4.yaml deleted file mode 100644 index 6b7ccdae5..000000000 --- a/releasenotes/notes/scale-params-error-4fa64ae7569ab3f4.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - | - The scale and flavor params have been deprecated since Newton. This is the - start of the process to removing these params. The CLI will now throw an - error if any of the old scale and/or flavor params are passed. This check - should be removed in a future release. diff --git a/releasenotes/notes/show-capabilities-29e4b6ebf6029ced.yaml b/releasenotes/notes/show-capabilities-29e4b6ebf6029ced.yaml deleted file mode 100644 index e8fd7e6f5..000000000 --- a/releasenotes/notes/show-capabilities-29e4b6ebf6029ced.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Command ``openstack baremetal show capabilities`` has been broken for some - time due to conflict with ``openstack baremetal show`` from Ironic. This - command was removed. diff --git a/releasenotes/notes/simple-raid-f293d2efec3afe1c.yaml b/releasenotes/notes/simple-raid-f293d2efec3afe1c.yaml deleted file mode 100644 index 8d980b95d..000000000 --- a/releasenotes/notes/simple-raid-f293d2efec3afe1c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add new command ``openstack overcloud raid create`` for building RAID on - given nodes. diff --git a/releasenotes/notes/skip-deploy-identifier-f7eb0d3ff5126f62.yaml b/releasenotes/notes/skip-deploy-identifier-f7eb0d3ff5126f62.yaml deleted file mode 100644 index bd34ae4db..000000000 --- a/releasenotes/notes/skip-deploy-identifier-f7eb0d3ff5126f62.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - Add a new cli argument, --skip-deploy-identifier. The argument will disable - setting a unique value for the DeployIdentifier parameter, which means the - SoftwareDeployment resources in the templates will only be triggered if - there is an actual change to their configuration. This argument can be used - to avoid always applying configuration, such as during node scale out. - This option should be used with Caution, and only if there is confidence - that the software configuration does not need to be run, such as when - scaling out certain roles. diff --git a/releasenotes/notes/skip-ssh-admin-for-config-download-only-442255cc3ac73534.yaml b/releasenotes/notes/skip-ssh-admin-for-config-download-only-442255cc3ac73534.yaml deleted file mode 100644 index 54e7763e7..000000000 --- a/releasenotes/notes/skip-ssh-admin-for-config-download-only-442255cc3ac73534.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - When running with --config-download-only, the enable ssh admin will now be - skipped. Skipping the ssh admin workflow saves time when trying to do the - config download workflow only. If the ssh admin workflow needs to be rerun, - the "openstack overcloud admin" command can be used. diff --git a/releasenotes/notes/sos-reporting-fc36aa73c7c5b85a.yaml b/releasenotes/notes/sos-reporting-fc36aa73c7c5b85a.yaml deleted file mode 100644 index decf8bff1..000000000 --- a/releasenotes/notes/sos-reporting-fc36aa73c7c5b85a.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - ReportExecute has been moved to the V2 client version where it will now - execute an ansible playbook when running all log collection tasks. This - playbook will run `sosreport` and collect a log archive on the undercloud - host. -deprecations: - - The log collection process will no longer store logs within swift. All - collected logs will be stored in the `--output` path as provided by the - CLI switch, using a default of **/var/lib/tripleo/support**. - - The following ReportExecute CLI switches no longer have any effect; - `--container`, `--skip-container-delete`, `--timeout`, `--concurrency`, - `--collect-only`, `--download-only`. These options have been retained - to ensure we're not breaking legacy compatibility, however, they will - be removed in a future release. diff --git a/releasenotes/notes/standalone-preflight-disabled-42719632e0b66e8f.yaml b/releasenotes/notes/standalone-preflight-disabled-42719632e0b66e8f.yaml deleted file mode 100644 index e2ce5dbdb..000000000 --- a/releasenotes/notes/standalone-preflight-disabled-42719632e0b66e8f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - The ``openstack tripleo deploy`` command no longer executes the preflight - validations by default. The ``--preflight-validation`` option should be - added to enable the validations. diff --git a/releasenotes/notes/stop-generating-overcloudrc.v3-65ea476a29cfc4bb.yaml b/releasenotes/notes/stop-generating-overcloudrc.v3-65ea476a29cfc4bb.yaml deleted file mode 100644 index 8d9c9b1f7..000000000 --- a/releasenotes/notes/stop-generating-overcloudrc.v3-65ea476a29cfc4bb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The overcloudrc.v3 file is no longer generated. The same contents are - available in overcloudrc. diff --git a/releasenotes/notes/stop-using-mistral-env-779df7d21b7b3a55.yaml b/releasenotes/notes/stop-using-mistral-env-779df7d21b7b3a55.yaml deleted file mode 100644 index 1757a7efe..000000000 --- a/releasenotes/notes/stop-using-mistral-env-779df7d21b7b3a55.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The environment configuration for deployments is now stored in a - file called ``plan-environment.yaml`` and stored in Swift with the - templates; Mistral is no longer used to store this data. Migration - of the existing plans is handled automatically. diff --git a/releasenotes/notes/strict-config-parsing-ded8d4994c8ea363.yaml b/releasenotes/notes/strict-config-parsing-ded8d4994c8ea363.yaml deleted file mode 100644 index 48073dd1e..000000000 --- a/releasenotes/notes/strict-config-parsing-ded8d4994c8ea363.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - The `undercloud.conf` file is now strictly parsed. If there are - detected issues within the `undercloud.conf` configuration file - operations will halt, producing an error and highlighting how - to resolve the issue. -fixes: - - The `undercloud.conf` file is now strictly parsed which ensures a - clean configuration when deploying the undercloud. diff --git a/releasenotes/notes/support-full-disk-images-8dc84619e8517629.yaml b/releasenotes/notes/support-full-disk-images-8dc84619e8517629.yaml deleted file mode 100644 index 718ff1774..000000000 --- a/releasenotes/notes/support-full-disk-images-8dc84619e8517629.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - Allow client to support whole disk images. Client - will now accept a --whole-disk flag on the - overcloud image upload command. When this flag is - set, it will only look for qcow2 image, not enforcing - the upload of initrd and vmlinuz images. It will also - not set these properties on the qcow2 image on glance. - This will allow Ironic to consider the uploaded image - as full disk image, giving the possibility to provide - full disk images in TripleO instead of single partition - ones. - Please look at `Ironic documentation `_ - for reference - diff --git a/releasenotes/notes/switch-overcloud-images-to-centos8-df7cb0c197f646e4.yaml b/releasenotes/notes/switch-overcloud-images-to-centos8-df7cb0c197f646e4.yaml deleted file mode 100644 index 555a1b56e..000000000 --- a/releasenotes/notes/switch-overcloud-images-to-centos8-df7cb0c197f646e4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Starting with "Ussuri", the default images that are built when running - the `openstack overcloud image build` action are python3 and centos8 based. diff --git a/releasenotes/notes/tag-for-label-28a53e362cbce219.yaml b/releasenotes/notes/tag-for-label-28a53e362cbce219.yaml deleted file mode 100644 index 1d3d50eb9..000000000 --- a/releasenotes/notes/tag-for-label-28a53e362cbce219.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - Specifying --tag-from-label will trigger tag discovery to be performed on - every image entry during a prepare run. This allow image registries to host - images with a mixture of versioned tags, removing the need to always rebuild - all images. diff --git a/releasenotes/notes/tripleo-container-image-delete-050ab75bb6e7187d.yaml b/releasenotes/notes/tripleo-container-image-delete-050ab75bb6e7187d.yaml deleted file mode 100644 index ea70e20f1..000000000 --- a/releasenotes/notes/tripleo-container-image-delete-050ab75bb6e7187d.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - With the new podman container setup comes an Apache served local image - registry. - - `openstack tripleo container image delete` allows you to maintain those - images, and remove those that are no longer required. - diff --git a/releasenotes/notes/tripleo-container-image-list-97d38a0e8a899d89.yaml b/releasenotes/notes/tripleo-container-image-list-97d38a0e8a899d89.yaml deleted file mode 100644 index fb51dca21..000000000 --- a/releasenotes/notes/tripleo-container-image-list-97d38a0e8a899d89.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - With the new podman container setup comes an Apache served local image - registry. - - `openstack tripleo container image list` gives you insight into your - images. diff --git a/releasenotes/notes/tripleo-container-image-push-0bff071650976f52.yaml b/releasenotes/notes/tripleo-container-image-push-0bff071650976f52.yaml deleted file mode 100644 index 39ea7d7a8..000000000 --- a/releasenotes/notes/tripleo-container-image-push-0bff071650976f52.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - With the new podman container setup comes an Apache served local image - registry. - - `openstack tripleo container image push` allows you to maintain those - images, and add new images as required. diff --git a/releasenotes/notes/tripleo-container-image-show-af7453683ad74182.yaml b/releasenotes/notes/tripleo-container-image-show-af7453683ad74182.yaml deleted file mode 100644 index f70815e86..000000000 --- a/releasenotes/notes/tripleo-container-image-show-af7453683ad74182.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - With the new podman container setup comes an Apache served local image - registry. - - `openstack tripleo container image show` will perform an inspection on - a given image, and present the details. diff --git a/releasenotes/notes/tripleo-deploy-experimental-7533f9a9ed18a72d.yaml b/releasenotes/notes/tripleo-deploy-experimental-7533f9a9ed18a72d.yaml deleted file mode 100644 index 84879c4c8..000000000 --- a/releasenotes/notes/tripleo-deploy-experimental-7533f9a9ed18a72d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - [EXPERIMENTAL] The `openstack tripleo deploy` command is experimental - and may change in future releases. diff --git a/releasenotes/notes/tripleo-deploy-not-experimental-be5bc0df8adecb07.yaml b/releasenotes/notes/tripleo-deploy-not-experimental-be5bc0df8adecb07.yaml deleted file mode 100644 index 9f190acf4..000000000 --- a/releasenotes/notes/tripleo-deploy-not-experimental-be5bc0df8adecb07.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - Removed experimental designation from tripleo deploy since we've used it for - several releases now and we're unlikely to change how it works. diff --git a/releasenotes/notes/tripleo-deploy-transport-ccc72043ce0eb776.yaml b/releasenotes/notes/tripleo-deploy-transport-ccc72043ce0eb776.yaml deleted file mode 100644 index 46d71e450..000000000 --- a/releasenotes/notes/tripleo-deploy-transport-ccc72043ce0eb776.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The --transport argument has been added to openstack tripleo deploy which - allows for specifying the ansible transport to use in the ansible - configuration file. diff --git a/releasenotes/notes/tripleo-deploy-working-dir-e0cdf80a82ac256d.yaml b/releasenotes/notes/tripleo-deploy-working-dir-e0cdf80a82ac256d.yaml deleted file mode 100644 index 2f78fa068..000000000 --- a/releasenotes/notes/tripleo-deploy-working-dir-e0cdf80a82ac256d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - The "openstack tripleo deploy" and "openstack undercloud install" commands - now save their generated artifacts from the deployment under a single - consistent directory, which by default is located at - ~/tripleo-deploy/. For the undercloud, this location is - ~/tripleo-deploy/undercloud. The directory can be overridden with the - --output-dir option. diff --git a/releasenotes/notes/tripleo-launch-heat-e0067a994d63ffed.yaml b/releasenotes/notes/tripleo-launch-heat-e0067a994d63ffed.yaml deleted file mode 100644 index 67557a7d5..000000000 --- a/releasenotes/notes/tripleo-launch-heat-e0067a994d63ffed.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - A new command "openstack tripleo launch heat" is added. The command starts - up the heat-all process in the foreground, and can be used for stack - creation to generate the config-download ansible content. diff --git a/releasenotes/notes/tripleo-prepare-d57bbccb2a44e8b2.yaml b/releasenotes/notes/tripleo-prepare-d57bbccb2a44e8b2.yaml deleted file mode 100644 index 2ebaf6c3f..000000000 --- a/releasenotes/notes/tripleo-prepare-d57bbccb2a44e8b2.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - | - The new command `openstack tripleo container image prepare` will do the - same container image preperation which happens during undercloud and - overcloud deploy, but in a standalone command. The prepare operations are - driven by a heat environment file containing the parameter - `ContainerImagePrepare`. This parameter allows multiple upload and - modification operations to be specified, and the result will be a list of - image parameters to use during a tripleo deployment. - - The command `openstack tripleo container image prepare default` will - generate a `ContainerImagePrepare` with the recommended defaults to use for - `openstack tripleo container image prepare`. diff --git a/releasenotes/notes/tripleo_logfile-237209469088b8c5.yaml b/releasenotes/notes/tripleo_logfile-237209469088b8c5.yaml deleted file mode 100644 index a99f03d98..000000000 --- a/releasenotes/notes/tripleo_logfile-237209469088b8c5.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -security: - - | - Undercloud and tripleo standalone deployments support logging - into a log file. In ``undercloud.conf`` the log file path may be - defined via `undercloud_log_file`. For the standalone - deployments, use the ``--log-file`` commmand line argument. - - By default, undercloud pre-flight/installation/upgrade logs - will be written into ``install-undercloud.log`` in the current dir - (wherefrom the client command is executed). diff --git a/releasenotes/notes/tripleo_validator_cli_refactor-64c298348d405347.yaml b/releasenotes/notes/tripleo_validator_cli_refactor-64c298348d405347.yaml deleted file mode 100644 index 70340acef..000000000 --- a/releasenotes/notes/tripleo_validator_cli_refactor-64c298348d405347.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -Features: - - | - The TripleO Validator CLI has been revamped and new subcommands have been - created. Moreover, the latter has been fully integrated with native - openstack client library. - - To list all the available validations: - - openstack tripleo validator list - To show detailed information about a validation: - - openstack tripleo validator show - To display validations parameters: - - openstack tripleo validator show parameter - To display information about the validations groups: - - openstack tripleo validator group info - To run the validations, by name or by group(s): - - openstack tripleo validator run diff --git a/releasenotes/notes/uc-unused-services-4270d05503ec2eba.yaml b/releasenotes/notes/uc-unused-services-4270d05503ec2eba.yaml deleted file mode 100644 index c56b9aebb..000000000 --- a/releasenotes/notes/uc-unused-services-4270d05503ec2eba.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -deprecations: - - | - The following ``[DEFAULT]`` options of ``undercloud.conf`` have been - deprecated and have no effect, because the correponding services are no - longer supported in Undercloud. - - - ``enable_cinder`` - - ``enable_keystone`` - - ``enable_swift`` - - ``enable_swift_encryption`` - - ``enable_telemetry`` diff --git a/releasenotes/notes/undercloud-auth-token-expiration-configuration-6159d733f09e1b6e.yaml b/releasenotes/notes/undercloud-auth-token-expiration-configuration-6159d733f09e1b6e.yaml deleted file mode 100644 index f5c21eefb..000000000 --- a/releasenotes/notes/undercloud-auth-token-expiration-configuration-6159d733f09e1b6e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds `auth_token_lifetime` to undercloud.conf with a default of 14400. This - configuration option exposes the keystone token expirataion as a top level - configuration since it may need to be increased to handle larger clouds. diff --git a/releasenotes/notes/undercloud-container-prepare-d272bdc30c073b29.yaml b/releasenotes/notes/undercloud-container-prepare-d272bdc30c073b29.yaml deleted file mode 100644 index 3da9900b4..000000000 --- a/releasenotes/notes/undercloud-container-prepare-d272bdc30c073b29.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - - If no undercloud.conf `container_images_file` is set then `openstack - undercloud install --use-heat` will deploy an undercloud with the latest - containers as specified by the defaults. This allows the - `container_images_file` option to be not mandatory. diff --git a/releasenotes/notes/undercloud-containers-tls-d513bfccb117ab35.yaml b/releasenotes/notes/undercloud-containers-tls-d513bfccb117ab35.yaml deleted file mode 100644 index 971a0c621..000000000 --- a/releasenotes/notes/undercloud-containers-tls-d513bfccb117ab35.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Similar to what instack-undercloud does, the containerized undercloud can - now take user-provided certificates/keys in the bundled PEM format. This is - done through the service_certificate option and is processed tripleoclient. diff --git a/releasenotes/notes/undercloud-deploy-tht-4af4b91774600e8f.yaml b/releasenotes/notes/undercloud-deploy-tht-4af4b91774600e8f.yaml deleted file mode 100644 index 63844c2fd..000000000 --- a/releasenotes/notes/undercloud-deploy-tht-4af4b91774600e8f.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -fixes: - - | - Fix undercloud heat installer renders Heat templates in - `/usr/share`, which contains t-h-t installed from the package. -features: - - | - New command line arguments `--output-dir` and `--cleanup` - define the heat templates processing rules for undercloud: - ``undercloud deploy --cleanup --output-dir /tmp/tht``. - - The `output_dir` and `cleanup` configuration options - for `undercloud.conf` may be used the same way and allow to - configure ``undercloud install --use-heat`` behavior. -upgrade: - - | - The content of the processed heat templates will be persisted - under the given path as `$output_dir/$tempdir/templates`, for - each run of the undercloud deploy or install commands, unless - the `cleanup` mode is requested. diff --git a/releasenotes/notes/undercloud-dry-run-30264c62d6d44626.yaml b/releasenotes/notes/undercloud-dry-run-30264c62d6d44626.yaml deleted file mode 100644 index f2a6853ea..000000000 --- a/releasenotes/notes/undercloud-dry-run-30264c62d6d44626.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The ``openstack undercloud install`` command now has a ``--dry-run`` - argument which will print the resulting install command instead of - executing it. diff --git a/releasenotes/notes/undercloud-ironic-inspector-dnsmasq-classless-static-routes-81ee8af8db74dbfe.yaml b/releasenotes/notes/undercloud-ironic-inspector-dnsmasq-classless-static-routes-81ee8af8db74dbfe.yaml deleted file mode 100644 index 59e3ad14f..000000000 --- a/releasenotes/notes/undercloud-ironic-inspector-dnsmasq-classless-static-routes-81ee8af8db74dbfe.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - The routes defined using ``host_routes`` option for subnet definitions in - the Undercloud configuration is now also configured in ironic inspeactors - dnsmasq on the Undercloud. The advanced routing options will be pushed to - the inspection ramdisk similar to the way they are already pushed to - IPA (ironic-python-agent). diff --git a/releasenotes/notes/undercloud-minion-install-6b369d8f5f3d6a89.yaml b/releasenotes/notes/undercloud-minion-install-6b369d8f5f3d6a89.yaml deleted file mode 100644 index 482b5f386..000000000 --- a/releasenotes/notes/undercloud-minion-install-6b369d8f5f3d6a89.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds `openstack undercloud minion install` and `openstack undercloud - minion upgrade` to install or upgrade an undercloud minion that can be used - to scale heat-engine and ironic-conductor horizontally. diff --git a/releasenotes/notes/undercloud-nameservers-fbfca8af10b3097f.yaml b/releasenotes/notes/undercloud-nameservers-fbfca8af10b3097f.yaml deleted file mode 100644 index 807e1eb30..000000000 --- a/releasenotes/notes/undercloud-nameservers-fbfca8af10b3097f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Bind ``undercloud_nameservers`` defined in `undercloud.conf` to the - ``DnsServers`` heat stack parameter. This ensures DNS - configuration applied via os-net-config undercloud install time as well. - That works additionally to ``UndercloudNameserver`` limited to the - ctlplane subnet DNS configuration executed at post-install steps only. diff --git a/releasenotes/notes/undercloud-overcloud-consistent-names-07210c5e1e82ffbc.yaml b/releasenotes/notes/undercloud-overcloud-consistent-names-07210c5e1e82ffbc.yaml deleted file mode 100644 index a63430dc0..000000000 --- a/releasenotes/notes/undercloud-overcloud-consistent-names-07210c5e1e82ffbc.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - For containerized undercloud deploy, a new option `--local_domain` - allows to override undercloud domain name and endpoints' FQDNs, which - default to 'undercloud'. diff --git a/releasenotes/notes/undercloud-timezone-052a6c1c05e80850.yaml b/releasenotes/notes/undercloud-timezone-052a6c1c05e80850.yaml deleted file mode 100644 index 12d41a2f3..000000000 --- a/releasenotes/notes/undercloud-timezone-052a6c1c05e80850.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - Host timezone can be managed during the undercloud installation. The default - timezone is the timezone already configured for the system. The timezone - can be configured by setting undercloud_timezone in undercloud.conf -upgrade: - - | - Host timezone can be managed during the undercloud upgrade. The default - timezone is the timezone already configured for the system. The timezone - can be configured by setting undercloud_timezone in undercloud.conf and it - is recommended to set this going forward. diff --git a/releasenotes/notes/undercloud_cleanup-e52612bfd2ff4148.yaml b/releasenotes/notes/undercloud_cleanup-e52612bfd2ff4148.yaml deleted file mode 100644 index b4c9ca42f..000000000 --- a/releasenotes/notes/undercloud_cleanup-e52612bfd2ff4148.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - The new option "upgrade_cleanup" is set to False by default but when - set to True, it'll cleanup the packages and configurations installed - on the undercloud after an upgrade. - This feature is experimental now and should be used for testing only. diff --git a/releasenotes/notes/undercloud_debug-764ec17aa0653def.yaml b/releasenotes/notes/undercloud_debug-764ec17aa0653def.yaml deleted file mode 100644 index c5e32b918..000000000 --- a/releasenotes/notes/undercloud_debug-764ec17aa0653def.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Verbosity is disabled by default when deploying or upgrading a containerized - undercloud; but it can be enabled with the option: --verbose diff --git a/releasenotes/notes/unused-param-warning-c5717c129adf543a.yaml b/releasenotes/notes/unused-param-warning-c5717c129adf543a.yaml deleted file mode 100644 index e105e37dc..000000000 --- a/releasenotes/notes/unused-param-warning-c5717c129adf543a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a warning message to provide the list of parameters which are - not used in a deployment plan, but provided by the user via environments. diff --git a/releasenotes/notes/update-converge-916c7682f4d0e9f5.yaml b/releasenotes/notes/update-converge-916c7682f4d0e9f5.yaml deleted file mode 100644 index 91e987482..000000000 --- a/releasenotes/notes/update-converge-916c7682f4d0e9f5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - For minor updates, an `openstack overcloud update converge` - command has been added and must be run to restore the deployment - plan (remove no-ops of some resources) after a minor update. diff --git a/releasenotes/notes/update-in-workflow-f2f88e8daf0533d4.yaml b/releasenotes/notes/update-in-workflow-f2f88e8daf0533d4.yaml deleted file mode 100644 index 13ff5bad6..000000000 --- a/releasenotes/notes/update-in-workflow-f2f88e8daf0533d4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixes `bug 1614928 - `__ Moves the package - update command to use a workflow. diff --git a/releasenotes/notes/upgrade-always-use-tripleo-admin-53505e9ce380cd4c.yaml b/releasenotes/notes/upgrade-always-use-tripleo-admin-53505e9ce380cd4c.yaml deleted file mode 100644 index e5a3633e3..000000000 --- a/releasenotes/notes/upgrade-always-use-tripleo-admin-53505e9ce380cd4c.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -upgrade: - - | - Upgrades and updates are now use tripleo-admin user to connect to - the overcloud by default, which makes it work the same in this - regard as fresh deployment. -deprecations: - - | - The `--ssh-user` parameter for `overcloud upgrade run` command and - similar commands is now deprecated and will be removed. In the - future, `tripleo-admin` user will be used always, which will make - it work the same as deployment workflow. diff --git a/releasenotes/notes/upgrade_prompt-405c4f9fe3b4764c.yaml b/releasenotes/notes/upgrade_prompt-405c4f9fe3b4764c.yaml deleted file mode 100644 index 618a79c21..000000000 --- a/releasenotes/notes/upgrade_prompt-405c4f9fe3b4764c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Prompt the operator before running the upgrades and suggest to perform a backup - before. Can be ignored with -y/--yes. diff --git a/releasenotes/notes/upgrade_update_prompt-f6ace53f02b62fa0.yaml b/releasenotes/notes/upgrade_update_prompt-f6ace53f02b62fa0.yaml deleted file mode 100644 index 3fa17f5d8..000000000 --- a/releasenotes/notes/upgrade_update_prompt-f6ace53f02b62fa0.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - The upgrade/update commands have a prompt by default now that ask for - confirmation before proceeding. It'll prevent an operator to run the - command and cause the problems to infrastructure. - This prompt can be skipped with --yes/-y argument. diff --git a/releasenotes/notes/upload-cleanup-511b915c83db8a82.yaml b/releasenotes/notes/upload-cleanup-511b915c83db8a82.yaml deleted file mode 100644 index 9cd25f1a8..000000000 --- a/releasenotes/notes/upload-cleanup-511b915c83db8a82.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The commands `openstack tripleo container image prepare` and `openstack - overcloud container image upload` now have a --cleanup option to control - what local images are removed after the image upload is complete. diff --git a/releasenotes/notes/upload-only-existing-images-4c84a73dddd6c862.yaml b/releasenotes/notes/upload-only-existing-images-4c84a73dddd6c862.yaml deleted file mode 100644 index d54618bb6..000000000 --- a/releasenotes/notes/upload-only-existing-images-4c84a73dddd6c862.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Add a new feature called image-type, that accepts 'os' and - 'ironic-python-agent' values. - When specified, it restricts the image to upload to that type, making it - easier to replace ipa/os images without having to collect the full set in - our working directory. diff --git a/releasenotes/notes/use_heat_default-366fe9593a72642a.yaml b/releasenotes/notes/use_heat_default-366fe9593a72642a.yaml deleted file mode 100644 index ad4c2d34f..000000000 --- a/releasenotes/notes/use_heat_default-366fe9593a72642a.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - The undercloud is now by default containerized and the deployment - is not driven by instack-undercloud anymore but by TripleO Heat Templates - like it's done for the overcloud. -upgrade: - - | - The upgrade from a non-containerized undercloud to a containerized - undercloud is supported and can be executed with ``openstack undercloud - upgrade`` command (same as before). diff --git a/releasenotes/notes/use_heat_type-d532fd41490f3b1f.yaml b/releasenotes/notes/use_heat_type-d532fd41490f3b1f.yaml deleted file mode 100644 index 4b199a165..000000000 --- a/releasenotes/notes/use_heat_type-d532fd41490f3b1f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -deprecations: - - | - --use-heat parameter is deprecated in Rocky cycle and will be removed in - the future. - When --use-heat / --use-heat=True is set, the undercloud will be - containerized and a warning will be shown for the deprecation. - When --use-heat=False is set, the undercloud won't be containerized. diff --git a/releasenotes/notes/validation-from-deployment-97536649daa282d7.yaml b/releasenotes/notes/validation-from-deployment-97536649daa282d7.yaml deleted file mode 100644 index 92891539f..000000000 --- a/releasenotes/notes/validation-from-deployment-97536649daa282d7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds the ability for external TripleO validations to - be called during a deployment workflow. diff --git a/releasenotes/notes/validation-from-introspection-a2c3c3b5bbe0c2fe.yaml b/releasenotes/notes/validation-from-introspection-a2c3c3b5bbe0c2fe.yaml deleted file mode 100644 index 60d6badc2..000000000 --- a/releasenotes/notes/validation-from-introspection-a2c3c3b5bbe0c2fe.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds the ability for external TripleO validations to - be called during an introspection workflow. diff --git a/releasenotes/notes/validation_logging_features-a7c096868197c42a.yaml b/releasenotes/notes/validation_logging_features-a7c096868197c42a.yaml deleted file mode 100644 index 7f81d64f9..000000000 --- a/releasenotes/notes/validation_logging_features-a7c096868197c42a.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - The TripleO Validator CLI has now a new logging feature which keep every - validation execution log in the Undercloud (/var/log/validations/). The CLI - is able to display the history and allow the user to get the full execution - details. diff --git a/releasenotes/notes/validations-in-workflows-021f93404f3a222e.yaml b/releasenotes/notes/validations-in-workflows-021f93404f3a222e.yaml deleted file mode 100644 index c2c396040..000000000 --- a/releasenotes/notes/validations-in-workflows-021f93404f3a222e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - Pre-deployment checks are now being called in a - workflow. This simplifies the client, and removes - code that does not need to be in the client. -fixes: - - Fixes `bug 1638697 - `__ Moves the - pre-deployment checks to workflows. diff --git a/releasenotes/notes/warn-deprecated-params-29197c5de2feb172.yaml b/releasenotes/notes/warn-deprecated-params-29197c5de2feb172.yaml deleted file mode 100644 index 007da8a4d..000000000 --- a/releasenotes/notes/warn-deprecated-params-29197c5de2feb172.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a warning log with the list of deprecated parameters in the plan. - diff --git a/releasenotes/notes/workflow-based-listings-6935d507c40a7e9d.yaml b/releasenotes/notes/workflow-based-listings-6935d507c40a7e9d.yaml deleted file mode 100644 index f1d2f8d7a..000000000 --- a/releasenotes/notes/workflow-based-listings-6935d507c40a7e9d.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - New ``openstack overcloud roles`` ``list`` and ``show`` commands - were added in order to look at the roles as they are defined in the - plan in the Swift container. -deprecations: - - | - ``openstack overcloud role list`` and ``openstack overcloud role - show`` are deprecated in favour of ``openstack overcloud roles - list`` and ``openstack overcloud roles show`` respectively. The new - commands operate directly on the plan rather than on the local - filesystem. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index cde5e5ce6..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,253 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2016, TripleO Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# The full version, including alpha/beta/rc tags. - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'python-tripleoclientReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'python-tripleoclientReleaseNotes.tex', - 'python-tripleoclient Release Notes Documentation', - '2016, TripleO Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'python-tripleoclientreleasenotes', 'python-tripleoclient Release Notes Documentation', - ['2016, TripleO Developers'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'python-tripleoclientReleaseNotes', 'python-tripleoclient Release Notes Documentation', - '2016, TripleO Developers', 'python-tripleoclientReleaseNotes', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index f27e9e960..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -============================================== -Welcome to python-tripleoclient Release Notes! -============================================== - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - wallaby - victoria - ussuri - train - stein - rocky - queens - pike - ocata - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42e..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst deleted file mode 100644 index e43bfc0ce..000000000 --- a/releasenotes/source/pike.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Pike Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst deleted file mode 100644 index 36ac6160c..000000000 --- a/releasenotes/source/queens.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Queens Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517b7..000000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb667..000000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 583900393..000000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 2334dd5cf..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - - .. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e0c..000000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6f3..000000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b56599..000000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f04f3e4fc..000000000 --- a/requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -pbr!=2.1.0,>=2.0.0 # Apache-2.0 - -psutil>=3.2.2 # BSD -python-ironic-inspector-client>=1.5.0 # Apache-2.0 -python-heatclient>=1.10.0 # Apache-2.0 -python-ironicclient!=2.5.2,!=2.7.1,!=3.0.0,>=2.3.0 # Apache-2.0 -python-openstackclient>=5.2.0 # Apache-2.0 -osc-lib>=2.3.0 # Apache-2.0 -tripleo-common>=16.3.0 # Apache-2.0 -cryptography>=2.1 # BSD/Apache-2.0 -ansible-runner>=2.0.0a2 # Apache 2.0 -validations-libs>=1.5.0 # Apache-2.0 -openstacksdk>=0.48.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index b9d7fc831..000000000 --- a/setup.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[metadata] -name = python-tripleoclient -summary = TripleO client -description_file = - README.rst -license = Apache License, Version 2.0 -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/python-tripleoclient/latest/ -python_requires = >=3.8 -classifier = - Environment :: Console - Environment :: OpenStack - Intended Audience :: Developers - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: OS Independent - Programming Language :: Python - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - -[files] -packages = - tripleoclient - -data_files = - share/python-tripleoclient/templates = templates/* - -[entry_points] -openstack.cli.extension = - tripleoclient = tripleoclient.plugin - -openstack.tripleoclient.v2 = - tripleo_config_generate_ansible = tripleoclient.v1.tripleo_config:GenerateAnsibleConfig - tripleo_deploy = tripleoclient.v1.tripleo_deploy:Deploy - tripleo_launch_heat = tripleoclient.v1.tripleo_launch_heat:LaunchHeat - tripleo_upgrade = tripleoclient.v1.tripleo_upgrade:Upgrade - overcloud_admin_authorize = tripleoclient.v1.overcloud_admin:Authorize - overcloud_netenv_validate = tripleoclient.v1.overcloud_netenv_validate:ValidateOvercloudNetenv - overcloud_cell_export = tripleoclient.v1.overcloud_cell:ExportCell - overcloud_ceph_deploy = tripleoclient.v2.overcloud_ceph:OvercloudCephDeploy - overcloud_ceph_spec = tripleoclient.v2.overcloud_ceph:OvercloudCephSpec - overcloud_ceph_user_disable = tripleoclient.v2.overcloud_ceph:OvercloudCephUserDisable - overcloud_ceph_user_enable = tripleoclient.v2.overcloud_ceph:OvercloudCephUserEnable - overcloud_delete = tripleoclient.v2.overcloud_delete:DeleteOvercloud - overcloud_credentials = tripleoclient.v1.overcloud_credentials:OvercloudCredentials - overcloud_deploy = tripleoclient.v1.overcloud_deploy:DeployOvercloud - overcloud_export = tripleoclient.v1.overcloud_export:ExportOvercloud - overcloud_export_ceph = tripleoclient.v1.overcloud_export_ceph:ExportOvercloudCeph - overcloud_status = tripleoclient.v1.overcloud_deploy:GetDeploymentStatus - overcloud_image_build = tripleoclient.v1.overcloud_image:BuildOvercloudImage - overcloud_image_upload = tripleoclient.v1.overcloud_image:UploadOvercloudImage - overcloud_network_extract = tripleoclient.v2.overcloud_network:OvercloudNetworkExtract - overcloud_network_provision = tripleoclient.v2.overcloud_network:OvercloudNetworkProvision - overcloud_network_vip_extract = tripleoclient.v2.overcloud_network:OvercloudVirtualIPsExtract - overcloud_network_vip_provision = tripleoclient.v2.overcloud_network:OvercloudVirtualIPsProvision - overcloud_network_unprovision = tripleoclient.v2.overcloud_network:OvercloudNetworkUnprovision - overcloud_node_configure = tripleoclient.v1.overcloud_node:ConfigureNode - overcloud_node_delete = tripleoclient.v1.overcloud_node:DeleteNode - overcloud_node_import = tripleoclient.v2.overcloud_node:ImportNode - overcloud_node_introspect = tripleoclient.v2.overcloud_node:IntrospectNode - overcloud_node_provide = tripleoclient.v1.overcloud_node:ProvideNode - overcloud_node_discover = tripleoclient.v1.overcloud_node:DiscoverNode - overcloud_node_clean = tripleoclient.v1.overcloud_node:CleanNode - overcloud_node_bios_configure = tripleoclient.v1.overcloud_bios:ConfigureBIOS - overcloud_node_bios_reset = tripleoclient.v1.overcloud_bios:ResetBIOS - overcloud_node_provision = tripleoclient.v2.overcloud_node:ProvisionNode - overcloud_node_unprovision = tripleoclient.v2.overcloud_node:UnprovisionNode - overcloud_node_extract_provisioned = tripleoclient.v1.overcloud_node:ExtractProvisionedNode - overcloud_profiles_match = tripleoclient.v1.overcloud_profiles:MatchProfiles - overcloud_profiles_list = tripleoclient.v1.overcloud_profiles:ListProfiles - overcloud_raid_create = tripleoclient.v1.overcloud_raid:CreateRAID - overcloud_role_show= tripleoclient.v1.overcloud_roles:RoleShow - overcloud_role_list = tripleoclient.v1.overcloud_roles:RoleList - overcloud_roles_generate = tripleoclient.v1.overcloud_roles:RolesGenerate - overcloud_support_report_collect = tripleoclient.v2.overcloud_support:ReportExecute - overcloud_update_prepare= tripleoclient.v1.overcloud_update:UpdatePrepare - overcloud_update_run = tripleoclient.v1.overcloud_update:UpdateRun - overcloud_upgrade_prepare = tripleoclient.v1.overcloud_upgrade:UpgradePrepare - overcloud_upgrade_run = tripleoclient.v1.overcloud_upgrade:UpgradeRun - overcloud_external-update_run = tripleoclient.v1.overcloud_external_update:ExternalUpdateRun - overcloud_external-upgrade_run = tripleoclient.v1.overcloud_external_upgrade:ExternalUpgradeRun - overcloud_generate_fencing = tripleoclient.v1.overcloud_parameters:GenerateFencingParameters - overcloud_backup = tripleoclient.v1.overcloud_backup:BackupOvercloud - overcloud_backup_snapshot = tripleoclient.v1.overcloud_backup:BackupSnapshot - overcloud_restore = tripleoclient.v1.overcloud_restore:RestoreOvercloud - tripleo_container_image_build = tripleoclient.v2.tripleo_container_image:Build - tripleo_container_image_hotfix = tripleoclient.v2.tripleo_container_image:HotFix - tripleo_container_image_delete = tripleoclient.v1.container_image:TripleOContainerImageDelete - tripleo_container_image_list = tripleoclient.v1.container_image:TripleOContainerImageList - tripleo_container_image_show = tripleoclient.v1.container_image:TripleOContainerImageShow - tripleo_container_image_push = tripleoclient.v1.container_image:TripleOContainerImagePush - tripleo_container_image_prepare = tripleoclient.v1.container_image:TripleOImagePrepare - tripleo_container_image_prepare_default = tripleoclient.v1.container_image:TripleOImagePrepareDefault - undercloud_install = tripleoclient.v1.undercloud:InstallUndercloud - undercloud_upgrade = tripleoclient.v1.undercloud:UpgradeUndercloud - undercloud_backup = tripleoclient.v1.undercloud_backup:BackupUndercloud - tripleo_validator_group_info = tripleoclient.v1.tripleo_validator:TripleOValidatorGroupInfo - tripleo_validator_list = tripleoclient.v1.tripleo_validator:TripleOValidatorList - tripleo_validator_run = tripleoclient.v1.tripleo_validator:TripleOValidatorRun - tripleo_validator_init = tripleoclient.v1.tripleo_validator:TripleOValidatorCommunityInit - tripleo_validator_show = tripleoclient.v1.tripleo_validator:TripleOValidatorShow - tripleo_validator_show_history = tripleoclient.v1.tripleo_validator:TripleOValidatorShowHistory - tripleo_validator_show_parameter = tripleoclient.v1.tripleo_validator:TripleOValidatorShowParameter - tripleo_validator_show_run = tripleoclient.v1.tripleo_validator:TripleOValidatorShowRun - -oslo.config.opts = - undercloud_config = tripleoclient.config.undercloud:list_opts - standalone_config = tripleoclient.config.standalone:list_opts - -[flake8] -show-source = True -builtins = _ -exclude = - .venv, - .git, - .tox, - dist, - doc/source/conf.py, - releasenotes - -#.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,releasenotes diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c35..000000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/templates/ephemeral-heat/heat-pod.yaml.j2 b/templates/ephemeral-heat/heat-pod.yaml.j2 deleted file mode 100644 index 2a9fc8070..000000000 --- a/templates/ephemeral-heat/heat-pod.yaml.j2 +++ /dev/null @@ -1,97 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - app: {{ heat_pod_name }} - name: {{ heat_pod_name }} - annotations: - seccomp.security.alpha.kubernetes.io/pod: localhost/seccomp_allow.json -spec: - hostNetwork: true - containers: - - command: - - heat-engine - - --config-file - - /etc/heat/heat.conf - env: - - name: PATH - value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: TERM - value: xterm - - name: container - value: oci - - name: LANG - value: en_US.UTF-8 - image: {{ engine_image }} - name: engine - resources: {} - securityContext: - allowPrivilegeEscalation: true - capabilities: {} - privileged: false - readOnlyRootFilesystem: false - runAsGroup: 0 - runAsUser: 0 - seLinuxOptions: {} - volumeMounts: - - mountPath: /var/log/heat - name: heat-log - - mountPath: /etc/heat/heat.conf - name: heat-config - readOnly: true - workingDir: / - - command: - - heat-api - - --config-file - - /etc/heat/heat.conf - env: - - name: PATH - value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: TERM - value: xterm - - name: container - value: oci - - name: LANG - value: en_US.UTF-8 - image: {{ api_image }} - name: api - resources: {} - securityContext: - allowPrivilegeEscalation: true - capabilities: {} - privileged: false - readOnlyRootFilesystem: false - runAsGroup: 0 - runAsUser: 0 - seLinuxOptions: {} - volumeMounts: - - mountPath: /var/log/heat - name: heat-log - - mountPath: /etc/heat/heat.conf - name: heat-config - readOnly: true - - mountPath: /etc/heat/api-paste.ini - name: heat-api-paste - readOnly: true - - mountPath: /token_file.json - name: heat-token-file - readOnly: true - workingDir: / - volumes: - - hostPath: - path: {{ heat_dir}}/log - type: Directory - name: heat-log - - hostPath: - path: {{ install_dir }}/heat.conf - type: File - name: heat-config - - hostPath: - path: {{ heat_dir }}/api-paste.ini - type: File - name: heat-api-paste - - hostPath: - path: {{ heat_dir }}/token_file.json - type: File - name: heat-token-file -status: {} diff --git a/templates/ephemeral-heat/heat.conf.j2 b/templates/ephemeral-heat/heat.conf.j2 deleted file mode 100644 index e952fea7d..000000000 --- a/templates/ephemeral-heat/heat.conf.j2 +++ /dev/null @@ -1,40 +0,0 @@ -[DEFAULT] -client_retry_limit=2 -convergence_engine = true -debug = true -default_deployment_signal_transport = HEAT_SIGNAL -deferred_auth_method = password -keystone_backend = heat.engine.clients.os.keystone.fake_keystoneclient.FakeKeystoneClient -log_dir = /var/log/heat -log_file = {{ log_file }} -max_json_body_size = 8388608 -max_nested_stack_depth = 10 -max_resources_per_stack=-1 -num_engine_workers = {{ num_engine_workers }} -rpc_response_timeout = 600 -transport_url={{ transport_url }} - -[oslo_messaging_notifications] -driver = noop - -[oslo_messaging_rabbit] -heartbeat_timeout_threshold=60 - -[noauth] -token_response = /token_file.json - -[heat_api] -bind_host = 0.0.0.0 -bind_port = {{ api_port }} -workers = 1 - -[database] -connection = {{ db_connection }} - -[paste_deploy] -api_paste_config = /etc/heat/api-paste.ini -flavor = noauth - -[yaql] -limit_iterators=10000 -memory_quota=900000 diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index f33be7013..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -coverage!=4.4,>=4.0 # Apache-2.0 -docutils>=0.11 # OSI-Approved Open Source, Public Domain -fixtures>=3.0.0 # Apache-2.0/BSD -stestr>=2.0.0 # Apache-2.0 -testtools>=2.2.0 # MIT -requests-mock>=1.2.0 # Apache-2.0 -testscenarios>=0.4 # Apache-2.0/BSD -validations-libs>=1.5.0 # Apache-2.0 diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 222901c84..000000000 --- a/tox.ini +++ /dev/null @@ -1,92 +0,0 @@ -[tox] -minversion = 3.18.0 -envlist = pep8,py - -# Automatic envs (pyXX) will only use the python version appropriate to that -# env and ignore basepython inherited from [testenv] if we set -# ignore_basepython_conflict. -ignore_basepython_conflict = True - -[testenv] -basepython = python3 -usedevelop = True -passenv = - LANG - LANGUAGE - LC_* - TERM -setenv = - HOME={envdir} - # https://github.com/pypa/pip/issues/10219#issuecomment-900898020 - LC_ALL={env:LC_ALL:en_US.UTF-8} - VIRTUAL_ENV={envdir} -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} - -[testenv:bindep] -# Do not install any requirements. We want this to be fast and work even if -# system dependencies are missing, since it's used to tell you what system -# dependencies are missing! This also means that bindep must be installed -# separately, outside of the requirements files. -deps = bindep -commands = bindep test - -[testenv:pep8] -deps = pre-commit -commands = pre-commit run -a - -[testenv:venv] -commands = {posargs} -passenv = * - -[testenv:cover] -setenv = - PYTHON=coverage run --source tripleoclient --parallel-mode - HOME={envdir} -commands = - coverage erase - stestr run --color {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report - -[testenv:debug] -deps = - oslotest - {[testenv]deps} -commands = oslo_debug_helper -t tripleoclient/tests {posargs} - -[testenv:docs] -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt -commands = - sphinx-build -a -E -W --keep-going -b html doc/source doc/build/html - -[testenv:pdf-docs] -allowlist_externals = make -description = - Build PDF documentation. -envdir = {toxworkdir}/docs -deps = {[testenv:docs]deps} -commands = - sphinx-build -b latex doc/source doc/build/pdf - make -C doc/build/pdf - -[testenv:genconfig] -setenv = - HOME={env:HOME:/home/stack} -commands = - oslo-config-generator --config-file config-generator/undercloud.conf - oslo-config-generator --config-file config-generator/standalone.conf - -[testenv:releasenotes] -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt -commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html diff --git a/tripleoclient/__init__.py b/tripleoclient/__init__.py deleted file mode 100644 index 8dc45755e..000000000 --- a/tripleoclient/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo('python-tripleoclient') diff --git a/tripleoclient/command.py b/tripleoclient/command.py deleted file mode 100644 index 805d4646e..000000000 --- a/tripleoclient/command.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from argparse import _StoreAction -import logging - -from osc_lib.command import command -from osc_lib import exceptions as oscexc - -from tripleoclient import exceptions -from tripleoclient import utils - - -class Command(command.Command): - - log = logging.getLogger(__name__ + ".Command") - - def run(self, parsed_args): - utils.store_cli_param(self.cmd_name, parsed_args) - try: - super(Command, self).run(parsed_args) - except (oscexc.CommandError, exceptions.Base): - raise - except Exception: - self.log.exception("Exception occured while running the command") - raise - - def get_key_pair(self, parsed_args): - """Autodetect or return a user defined key file. - - :param parsed_args: An argparse object. - :type parsed_args: Object - - :returns: String - """ - - if not parsed_args.overcloud_ssh_key: - key = utils.get_key( - stack=parsed_args.stack, - needs_pair=True - ) - if not key: - raise oscexc.CommandError( - 'No key pair found, set the ssh key using' - 'the --overcloud-ssh-key switch.' - ) - return key - - return parsed_args.overcloud_ssh_key - - def _configure_logging(self, parsed_args): - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - handler = logging.StreamHandler() - handler.setFormatter(formatter) - self.log.addHandler(handler) - if self.app_args.verbose_level >= 2: - handler.setLevel(logging.DEBUG) - else: - handler.setLevel(logging.INFO) - - -class Lister(Command, command.Lister): - pass - - -class ShowOne(Command, command.ShowOne): - pass - - -class DeprecatedActionStore(_StoreAction): - """To deprecated an option an store the value""" - log = logging.getLogger(__name__) - - def __call__(self, parser, namespace, values, option_string=None): - """Display the warning message""" - if len(self.option_strings) == 1: - message = 'The option {option} is deprecated, it will be removed'\ - ' in a future version'.format( - option=self.option_strings[0]) - else: - option = ', '.join(self.option_strings) - message = 'The options {option} is deprecated, it will be removed'\ - ' in a future version'.format(option=option) - - self.log.warning(message) - super(DeprecatedActionStore, self).__call__( - parser, namespace, values, option_string) diff --git a/tripleoclient/config/__init__.py b/tripleoclient/config/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/config/base.py b/tripleoclient/config/base.py deleted file mode 100644 index 443359a32..000000000 --- a/tripleoclient/config/base.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg - - -class BaseConfig(object): - - def sort_opts(self, opts): - """Sort oslo config options by name - - :param opts: list of olo cfg opts - :return list - sorted by name - """ - def sort_cfg(cfg): - return cfg.name - return sorted(opts, key=sort_cfg) - - def get_base_opts(self): - _opts = [ - # TODO(aschultz): rename undercloud_output_dir - cfg.StrOpt('output_dir', - help=( - 'Directory to output state, processed heat ' - 'templates, ansible deployment files.' - 'Defaults to ~/tripleo-deploy/')), - cfg.BoolOpt('cleanup', - default=True, - help=('Cleanup temporary files. Setting this to ' - 'False will leave the temporary files used ' - 'during deployment in place after the command ' - 'is run. This is useful for debugging the ' - 'generated files or if errors occur.'), - ), - ] - return self.sort_opts(_opts) diff --git a/tripleoclient/config/standalone.py b/tripleoclient/config/standalone.py deleted file mode 100644 index 8c23d1488..000000000 --- a/tripleoclient/config/standalone.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import copy - -from osc_lib.i18n import _ -from oslo_config import cfg -from tripleoclient.config.base import BaseConfig -from tripleoclient.constants import DEFAULT_HEAT_CONTAINER - -NETCONFIG_TAGS_EXAMPLE = """ -"network_config": [ - { - "type": "ovs_bridge", - "name": "br-ctlplane", - "ovs_extra": [ - "br-set-external-id br-ctlplane bridge-id br-ctlplane" - ], - "members": [ - { - "type": "interface", - "name": "{{LOCAL_INTERFACE}}", - "primary": "true", - "mtu": {{LOCAL_MTU}}, - "dns_servers": {{UNDERCLOUD_NAMESERVERS}} - } - ], - "addresses": [ - { - "ip_netmask": "{{PUBLIC_INTERFACE_IP}}" - } - ], - "routes": {{SUBNETS_STATIC_ROUTES}}, - "mtu": {{LOCAL_MTU}} -} -] -""" - - -class StandaloneConfig(BaseConfig): - - def get_enable_service_opts(self, cinder=False, frr=False, ironic=False, - ironic_inspector=False, - nova=False, novajoin=False, swift=False, - telemetry=False, validations=False, - neutron=False, heat=False, keystone=True): - _opts = [ - # service enablement - cfg.BoolOpt('enable_cinder', - default=cinder, - deprecated_for_removal=True, - deprecated_reason=_('Cinder can no longer be enabled ' - 'via the config settings.'), - help=_('Whether to install the cinder service.')), - cfg.BoolOpt('enable_frr', - default=frr, - help=_('Whether to enable the frr service.')), - cfg.BoolOpt('enable_ironic', - default=ironic, - help=_('Whether to enable the ironic service.')), - cfg.BoolOpt('enable_ironic_inspector', - default=ironic_inspector, - help=_('Whether to enable the ironic inspector ' - 'service.') - ), - cfg.BoolOpt('enable_nova', - default=nova, - deprecated_for_removal=True, - deprecated_reason=_('Nova can no longer be enabled ' - 'via the config settings.'), - help=_('Whether to enable the nova service.')), - cfg.BoolOpt('enable_novajoin', - default=novajoin, - deprecated_for_removal=True, - deprecated_reason=('Support for the novajoin metadata ' - 'service has been deprecated.'), - help=_('Whether to install the novajoin metadata ' - 'service') - ), - cfg.BoolOpt('enable_swift', - default=swift, - deprecated_for_removal=True, - deprecated_reason=_('Swift can no longer be enabled ' - 'via the config settings.'), - help=_('Whether to install the swift services') - ), - cfg.BoolOpt('enable_telemetry', - default=telemetry, - deprecated_for_removal=True, - deprecated_reason=_('Telemetry can no longer be ' - 'enabled via the config ' - 'settings.'), - help=_('Whether to install Telemetry services ' - '(ceilometer, gnocchi, aodh).') - ), - cfg.BoolOpt('enable_validations', - default=validations, - help=_( - 'Whether to install requirements to run the ' - 'TripleO validations.') - ), - cfg.BoolOpt('enable_neutron', - default=neutron, - help=_('Whether to enable the neutron service.')), - cfg.BoolOpt('enable_heat', - default=heat, - deprecated_for_removal=True, - deprecated_reason=('Heat has been replaced by the ' - 'heat-ephemeral service and this ' - 'option has been deprecated.'), - help=_('Whether to enable the heat service.')), - cfg.BoolOpt('enable_keystone', - default=keystone, - deprecated_for_removal=True, - deprecated_reason=_('Keystone can no longer be ' - 'enabled via the config ' - 'settings.'), - help=_('Whether to enable the keystone service.')), - - ] - return self.sort_opts(_opts) - - def get_base_opts(self): - _base_opts = super(StandaloneConfig, self).get_base_opts() - _opts = [ - # deployment options - cfg.StrOpt('deployment_user', - help=_( - 'User used to run openstack undercloud install ' - 'command.') - ), - cfg.StrOpt('hieradata_override', - help=_( - 'Path to hieradata override file. Relative paths ' - 'get computed inside of $HOME. When it points to a ' - 'heat env file, it is passed in ' - 'tripleo-heat-templates via "-e ", as is. ' - 'When the file contains legacy instack data, it is ' - 'wrapped with UndercloudExtraConfig and also ' - 'passed in for tripleo-heat-templates as a temp ' - 'file created in output_dir. Note, instack ' - 'hieradata may be incompatible with ' - 'tripleo-heat-templates and will highly likely ' - 'require a manual revision.') - ), - cfg.StrOpt('net_config_override', - help=_( - 'Path to network config override template. ' - 'Relative paths get computed inside of $HOME. ' - 'Must be in the json or yaml format. ' - 'Its content overrides anything in ' - 'NetConfigOverride. The processed ' - 'template is then passed in Heat via the ' - 'generated parameters file created in ' - 'output_dir and used to configure the networking ' - 'via run-os-net-config. If you wish to disable ' - 'you can set this location to an empty file. ' - 'Templated for instack j2 tags ' - 'may be used, ' - 'for example:\n%s ') % NETCONFIG_TAGS_EXAMPLE - ), - cfg.StrOpt('templates', - help=_('The tripleo-heat-templates directory to ' - 'override') - ), - cfg.StrOpt('roles_file', - help=_('Roles file to override for heat. May be an ' - 'absolute path or the path relative to the ' - 'tripleo-heat-templates directory used for ' - 'deployment') - ), - cfg.StrOpt('networks_file', - help=_('Networks file to override for heat. May be an ' - 'absolute path or the path relative to the ' - 'tripleo-heat-templates directory used for ' - 'deployment') - ), - cfg.BoolOpt('heat_native', - default=True, - help=_('Execute the heat-all process natively on this ' - 'host. This option requires that the heat-all ' - 'binaries be installed locally on this machine.' - ' This option is enabled by default which means' - ' heat-all is executed on the host OS ' - ' directly If this is set to false, a ' - 'containerized version of heat-all is used.')), - cfg.StrOpt('heat_container_image', - help=_('Custom URL for the heat-all container image to ' - 'use as part of the undercloud deployment. If ' - 'not specified, the default "%s" is used. ' - 'If this location requires authentication, ' - 'run podman login prior to running the ' - 'undercloud install.' % DEFAULT_HEAT_CONTAINER) - ), - cfg.StrOpt('container_images_file', - required=False, - help=_( - 'REQUIRED if authentication is needed to fetch ' - 'containers. This file should contain values for ' - '"ContainerImagePrepare" and ' - '"ContainerImageRegistryCredentials" that will be ' - 'used to fetch the containers for the undercloud ' - 'installation. `openstack tripleo container image ' - 'prepare default` can be used to provide a sample ' - '"ContainerImagePrepare" value. Alternatively this ' - 'file can contain all the required Heat parameters ' - 'for the containers for advanced configurations.')), - cfg.ListOpt('custom_env_files', - default=[], - help=_('List of any custom environment yaml files to ' - 'use. These are applied after any other ' - 'configuration and can be used to override ' - 'any derived values. This should be used ' - 'only by advanced users.')), - # container config bits - cfg.StrOpt('container_registry_mirror', - help=_( - 'An optional container registry mirror that will ' - 'be used.') - ), - cfg.ListOpt('container_insecure_registries', - default=[], - help=_('Used to add custom insecure registries for ' - 'containers.') - ), - cfg.StrOpt('container_cli', - default='podman', - choices=('podman',), - help=_('Container CLI used for deployment; ' - 'Only podman is allowed.')), - cfg.BoolOpt('container_healthcheck_disabled', - default=False, - help=_( - 'Whether or not we disable the container ' - 'healthchecks.')), - ] - return self.sort_opts(_base_opts + _opts) - - def get_opts(self): - return self.sort_opts(self.get_base_opts() + - self.get_enable_service_opts()) - - -# this is needed for the oslo config generator -def list_opts(): - return [(None, copy.deepcopy(StandaloneConfig().get_opts()))] diff --git a/tripleoclient/config/undercloud.py b/tripleoclient/config/undercloud.py deleted file mode 100644 index 3eafbe293..000000000 --- a/tripleoclient/config/undercloud.py +++ /dev/null @@ -1,462 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import copy - -from osc_lib.i18n import _ -from oslo_config import cfg - -from tripleoclient import constants - -from tripleoclient.config.standalone import StandaloneConfig - -CONF = cfg.CONF - -# Control plane network name -SUBNETS_DEFAULT = ['ctlplane-subnet'] - -CIDR_HELP_STR = _( - 'Network CIDR for the Neutron-managed subnet for Overcloud instances.') -DHCP_START_HELP_STR = _( - 'Start of DHCP allocation range for PXE and DHCP of Overcloud instances ' - 'on this network.') -DHCP_END_HELP_STR = _( - 'End of DHCP allocation range for PXE and DHCP of Overcloud instances on ' - 'this network.') -DHCP_EXCLUDE_HELP_STR = _( - 'List of IP addresses or IP ranges to exclude from the subnets allocation ' - 'pool. Example: 192.168.24.50,192.168.24.80-192.168.24.90') -INSPECTION_IPRANGE_HELP_STR = _( - 'Temporary IP range that will be given to nodes on this network during ' - 'the inspection process. Should not overlap with the range defined by ' - 'dhcp_start and dhcp_end, but should be in the same ip subnet.') -GATEWAY_HELP_STR = _( - 'Network gateway for the Neutron-managed network for Overcloud instances ' - 'on this network.') -MASQUERADE_HELP_STR = _( - 'The network will be masqueraded for external access.') -HOST_ROUTES_HELP_STR = _( - 'Host routes for the Neutron-managed subnet for the Overcloud instances ' - 'on this network. The host routes on the local_subnet will also be ' - 'configured on the undercloud.') -DNS_NAMESERVERS_HELP_STR = _( - 'DNS nameservers for the Neutron-managed subnet for the Overcloud ' - 'instances on this network. If no nameservers are defined for the subnet, ' - 'the nameservers defined for undercloud_nameservers will be used.') - -# Deprecated options -_deprecated_opt_network_gateway = [cfg.DeprecatedOpt( - 'network_gateway', group='DEFAULT')] -_deprecated_opt_network_cidr = [cfg.DeprecatedOpt( - 'network_cidr', group='DEFAULT')] -_deprecated_opt_dhcp_start = [cfg.DeprecatedOpt( - 'dhcp_start', group='DEFAULT')] -_deprecated_opt_dhcp_end = [cfg.DeprecatedOpt('dhcp_end', group='DEFAULT')] -_deprecated_opt_inspection_iprange = [cfg.DeprecatedOpt( - 'inspection_iprange', group='DEFAULT')] - - -class UndercloudConfig(StandaloneConfig): - def get_undercloud_service_opts(self): - return super(UndercloudConfig, self).get_enable_service_opts( - keystone=False, - cinder=False, - heat=False, - ironic=True, - ironic_inspector=True, - neutron=True, - nova=False, - novajoin=False, - swift=False, - telemetry=False, - validations=True) - - def get_base_opts(self): - _base_opts = super(UndercloudConfig, self).get_base_opts() - _opts = [ - cfg.StrOpt('undercloud_log_file', - default=constants.UNDERCLOUD_LOG_FILE, - help=_( - 'The path to a log file to store the ' - 'undercloud install/upgrade logs.'), - ), - cfg.StrOpt('undercloud_hostname', - help=_( - 'Fully qualified hostname (including domain) to ' - 'set on the Undercloud. If left unset, the current ' - 'hostname will be used, but the user is ' - 'responsible for configuring all system hostname ' - 'settings appropriately. If set, the undercloud ' - 'install will configure all system hostname ' - 'settings.'), - ), - cfg.StrOpt('local_ip', - default='192.168.24.1/24', - help=_( - 'IP information for the interface on the ' - 'Undercloud that will be handling the PXE boots ' - 'and DHCP for Overcloud instances. The IP portion ' - 'of the value will be assigned to the network ' - 'interface defined by local_interface, with the ' - 'netmask defined by the prefix portion of the ' - 'value.') - ), - cfg.StrOpt('undercloud_public_host', - deprecated_name='undercloud_public_vip', - default='192.168.24.2', - help=_( - 'Virtual IP or DNS address to use for the public ' - 'endpoints of Undercloud services. Only used ' - 'with SSL.') - ), - cfg.StrOpt('undercloud_admin_host', - deprecated_name='undercloud_admin_vip', - default='192.168.24.3', - help=_( - 'Virtual IP or DNS address to use for the admin ' - 'endpoints of Undercloud services. Only used ' - 'with SSL.') - ), - cfg.ListOpt('undercloud_nameservers', - default=[], - help=_( - 'DNS nameserver(s). Use for the undercloud ' - 'node and for the overcloud nodes. (NOTE: To use ' - 'different nameserver(s) for the overcloud, ' - 'override the DnsServers parameter in overcloud ' - 'environment.)'), - ), - cfg.ListOpt('undercloud_ntp_servers', - default=['0.pool.ntp.org', '1.pool.ntp.org', - '2.pool.ntp.org', '3.pool.ntp.org'], - help=_('List of ntp servers to use.')), - cfg.StrOpt('undercloud_timezone', - help=_('Host timezone to be used. If no timezone is ' - 'specified, the existing timezone configuration ' - 'is used.')), - cfg.StrOpt('overcloud_domain_name', - default='localdomain', - help=_( - 'DNS domain name to use when deploying the ' - 'overcloud. The overcloud parameter "CloudDomain" ' - 'must be set to a matching value.') - ), - cfg.ListOpt('subnets', - default=SUBNETS_DEFAULT, - help=_( - 'List of routed network subnets for ' - 'provisioning and introspection. Comma ' - 'separated list of names/tags. For each network ' - 'a section/group needs to be added to the ' - 'configuration file with these parameters set: ' - 'cidr, dhcp_start, dhcp_end, inspection_iprange, ' - 'gateway and masquerade_network. Note: The ' - 'section/group must be placed before or after ' - 'any other section. (See the example section ' - '[ctlplane-subnet] in the sample configuration ' - 'file.)')), - cfg.StrOpt('local_subnet', - default=SUBNETS_DEFAULT[0], - help=_( - 'Name of the local subnet, where the PXE boot and ' - 'DHCP interfaces for overcloud instances is ' - 'located. The IP address of the ' - 'local_ip/local_interface should reside ' - 'in this subnet.')), - cfg.StrOpt('undercloud_service_certificate', - help=_( - 'Certificate file to use for OpenStack service SSL ' - 'connections. Setting this enables SSL for the ' - 'OpenStack API endpoints, leaving it unset ' - 'disables SSL.') - ), - cfg.BoolOpt('generate_service_certificate', - default=True, - help=_( - 'When set to True, an SSL certificate will be ' - 'generated as part of the undercloud install and ' - 'this certificate will be used in place of the ' - 'value for undercloud_service_certificate. The ' - 'resulting certificate will be written to ' - '/etc/pki/tls/private/overcloud_endpoint.pem. This' - ' certificate is signed by CA selected by the ' - '"certificate_generation_ca" option.') - ), - cfg.StrOpt('certificate_generation_ca', - default='local', - help=_( - 'The certmonger nickname of the CA from which ' - 'the certificate will be requested. This is used ' - 'only if the generate_service_certificate option ' - 'is set. Note that if the "local" CA is selected ' - 'the certmonger\'s local CA certificate will be ' - 'extracted to /etc/pki/ca-trust/source/anchors/' - 'cm-local-ca.pem and subsequently added to the ' - 'trust chain.') - ), - cfg.StrOpt('service_principal', - help=_( - 'The kerberos principal for the service that will ' - 'use the certificate. This is only needed if your ' - 'CA requires a kerberos principal. e.g. with ' - 'FreeIPA.') - ), - cfg.StrOpt('local_interface', - default='eth1', - help=_('Network interface on the Undercloud that will ' - 'be handling the PXE boots and DHCP for ' - 'Overcloud instances.') - ), - cfg.IntOpt('local_mtu', - default=1500, - help=_('MTU to use for the local_interface.') - ), - cfg.StrOpt('inspection_interface', - default='br-ctlplane', - deprecated_name='discovery_interface', - help=_( - 'Network interface on which inspection dnsmasq ' - 'will listen. If in doubt, use the default value.') - ), - cfg.BoolOpt('inspection_extras', - default=True, - help=_( - 'Whether to enable extra hardware collection ' - 'during the inspection process. Requires ' - 'python-hardware or python-hardware-detect ' - 'package on the introspection image.')), - cfg.BoolOpt('inspection_runbench', - default=False, - deprecated_name='discovery_runbench', - help=_( - 'Whether to run benchmarks when inspecting ' - 'nodes. Requires inspection_extras set to True.') - ), - cfg.BoolOpt('enable_node_discovery', - default=False, - help=_( - 'Makes ironic-inspector enroll any unknown node ' - 'that PXE-boots introspection ramdisk in Ironic. ' - 'By default, the "fake" driver is used for new ' - 'nodes (it is automatically enabled when this ' - 'option is set to True). Set ' - 'discovery_default_driver to override. ' - 'Introspection rules can also be used to specify ' - 'driver information for newly enrolled nodes.') - ), - cfg.StrOpt('discovery_default_driver', - default='ipmi', - help=_( - 'The default driver or hardware type to use for ' - 'newly discovered nodes (requires ' - 'enable_node_discovery set to True). It is ' - 'automatically added to enabled_hardware_types.') - ), - cfg.BoolOpt('undercloud_debug', - default=True, - help=_( - 'Whether to enable the debug log level for ' - 'Undercloud OpenStack services and Container ' - 'Image Prepare step.') - ), - cfg.BoolOpt('undercloud_enable_selinux', - default=True, - help=_('Enable or disable SELinux during the ' - 'deployment.')), - cfg.StrOpt('ipa_otp', - help=_( - 'One Time Password to register Undercloud node ' - 'with an IPA server.') - ), - cfg.BoolOpt('ipxe_enabled', - default=True, - help=_('Whether to use iPXE for inspection.'), - deprecated_name='ipxe_deploy', - ), - cfg.IntOpt('scheduler_max_attempts', - default=30, min=1, - deprecated_for_removal=True, - deprecated_reason=_( - 'This option has no effect since nova was removed ' - 'from undercloud.'), - help=_( - 'Maximum number of attempts the scheduler will ' - 'make when deploying the instance. You should keep ' - 'it greater or equal to the number of bare metal ' - 'nodes you expect to deploy at once to work around ' - 'potential race condition when scheduling.')), - cfg.BoolOpt('clean_nodes', - default=False, - help=_( - 'Whether to clean overcloud nodes (wipe the hard ' - 'drive) between deployments and after the ' - 'introspection.')), - cfg.ListOpt('enabled_hardware_types', - default=['ipmi', 'redfish', 'ilo', 'idrac'], - help=_('List of enabled bare metal hardware types ' - '(next generation drivers).')), - cfg.BoolOpt('enable_routed_networks', - default=False, - help=_( - 'Enable support for routed ctlplane networks.')), - cfg.BoolOpt('enable_swift_encryption', - default=False, - deprecated_for_removal=True, - deprecated_reason=_( - 'Swift has been disabled in undercloud.'), - help=_( - 'Whether to enable Swift encryption at-rest or ' - 'not.' - )), - cfg.ListOpt('additional_architectures', - default=[], - help=(_( - 'List of additional architectures enabled in ' - 'your cloud environment. The list of supported ' - 'values is: %s') % - ' '.join(constants.ADDITIONAL_ARCHITECTURES)) - ), - cfg.StrOpt('ipv6_address_mode', - default='dhcpv6-stateless', - choices=[ - ('dhcpv6-stateless', 'Address configuration using ' - 'RA and optional information ' - 'using DHCPv6.'), - ('dhcpv6-stateful', 'Address configuration and ' - 'optional information using ' - 'DHCPv6.') - ], - help=(_('IPv6 address configuration mode for the ' - 'undercloud provisioning network.')) - ), - cfg.ListOpt('ironic_enabled_network_interfaces', - default=['flat'], - help=(_('Enabled ironic network interface ' - 'implementations. Each hardware type must ' - 'have at least one valid implementation ' - 'enabled.')) - ), - cfg.StrOpt('ironic_default_network_interface', - default='flat', - choices=[ - ('flat', 'Use one flat provider network.'), - ('neutron', 'Ironic interacts with Neutron to ' - 'enable other network types and ' - 'advanced networking features.') - ], - help=(_('Ironic network interface implementation to ' - 'use by default.')) - ), - cfg.StrOpt('auth_token_lifetime', - default=14400, - help=(_( - 'Authentication token expiration time in ' - 'seconds. Note reducing this can have impacts on ' - 'long running undercloud processes.')) - ), - ] - return self.sort_opts(_base_opts + _opts) - - def get_opts(self): - _base_opts = self.get_base_opts() - _service_opts = self.get_undercloud_service_opts() - return self.sort_opts(_base_opts + _service_opts) - - def get_local_subnet_opts(self): - _subnets_opts = [ - cfg.StrOpt('cidr', - default=constants.CTLPLANE_CIDR_DEFAULT, - deprecated_opts=_deprecated_opt_network_cidr, - help=CIDR_HELP_STR), - cfg.ListOpt('dhcp_start', - default=constants.CTLPLANE_DHCP_START_DEFAULT, - deprecated_opts=_deprecated_opt_dhcp_start, - help=DHCP_START_HELP_STR), - cfg.ListOpt('dhcp_end', - default=constants.CTLPLANE_DHCP_END_DEFAULT, - deprecated_opts=_deprecated_opt_dhcp_end, - help=DHCP_END_HELP_STR), - cfg.ListOpt('dhcp_exclude', - default=[], - help=DHCP_EXCLUDE_HELP_STR), - cfg.StrOpt('inspection_iprange', - default=constants.CTLPLANE_INSPECTION_IPRANGE_DEFAULT, - deprecated_opts=_deprecated_opt_inspection_iprange, - help=INSPECTION_IPRANGE_HELP_STR), - cfg.StrOpt('gateway', - default=constants.CTLPLANE_GATEWAY_DEFAULT, - deprecated_opts=_deprecated_opt_network_gateway, - help=GATEWAY_HELP_STR), - cfg.BoolOpt('masquerade', - default=False, - help=MASQUERADE_HELP_STR), - cfg.ListOpt('host_routes', - item_type=cfg.types.Dict(bounds=True), - bounds=True, - default=[], - sample_default=('[{destination: 10.10.10.0/24, ' - 'nexthop: 192.168.24.1}]'), - help=HOST_ROUTES_HELP_STR), - cfg.ListOpt('dns_nameservers', - default=constants.CTLPLANE_DNS_NAMESERVERS_DEFAULT, - help=DNS_NAMESERVERS_HELP_STR), - ] - return self.sort_opts(_subnets_opts) - - def get_remote_subnet_opts(self): - _subnets_opts = [ - cfg.StrOpt('cidr', - help=CIDR_HELP_STR), - cfg.ListOpt('dhcp_start', - default=[], - help=DHCP_START_HELP_STR), - cfg.ListOpt('dhcp_end', - default=[], - help=DHCP_END_HELP_STR), - cfg.ListOpt('dhcp_exclude', - default=[], - help=DHCP_EXCLUDE_HELP_STR), - cfg.StrOpt('inspection_iprange', - help=INSPECTION_IPRANGE_HELP_STR), - cfg.StrOpt('gateway', - help=GATEWAY_HELP_STR), - cfg.BoolOpt('masquerade', - default=False, - help=MASQUERADE_HELP_STR), - cfg.ListOpt('host_routes', - item_type=cfg.types.Dict(bounds=True), - bounds=True, - default=[], - help=HOST_ROUTES_HELP_STR), - cfg.ListOpt('dns_nameservers', - default=constants.CTLPLANE_DNS_NAMESERVERS_DEFAULT, - help=DNS_NAMESERVERS_HELP_STR), - ] - return self.sort_opts(_subnets_opts) - - -def list_opts(): - """List config opts for oslo config generator""" - config = UndercloudConfig() - _opts = config.get_opts() - return [(None, copy.deepcopy(_opts)), - (SUBNETS_DEFAULT[0], - copy.deepcopy(config.get_local_subnet_opts()))] - - -def load_global_config(): - """Register UndercloudConfig options into global config""" - _opts = UndercloudConfig().get_opts() - CONF.register_opts(_opts) diff --git a/tripleoclient/constants.py b/tripleoclient/constants.py deleted file mode 100644 index 62baa638e..000000000 --- a/tripleoclient/constants.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import configparser -import os -import sys - -from osc_lib.i18n import _ -from tripleo_common.image import kolla_builder - -TRIPLEO_ARCHIVE_DIR = "/var/lib/tripleo/archive" -TRIPLEO_HEAT_TEMPLATES = "/usr/share/openstack-tripleo-heat-templates/" -OVERCLOUD_YAML_NAME = "overcloud.yaml" -OVERCLOUD_ROLES_FILE = "roles_data.yaml" -UNDERCLOUD_ROLES_FILE = "roles_data_undercloud.yaml" -STANDALONE_ROLES_FILE = "roles_data_standalone.yaml" -STANDALONE_EPHEMERAL_STACK_VSTATE = '/var/lib/tripleo-heat-installer' -UNDERCLOUD_LOG_FILE = "install-undercloud.log" -OVERCLOUD_NETWORKS_FILE = "network_data_default.yaml" -OVERCLOUD_VIP_FILE = "vip_data_default.yaml" -STANDALONE_NETWORKS_FILE = "/dev/null" -UNDERCLOUD_NETWORKS_FILE = "network_data_undercloud.yaml" -ANSIBLE_HOSTS_FILENAME = "hosts.yaml" -EPHEMERAL_HEAT_POD_NAME = "ephemeral-heat" -ANSIBLE_CWL = "tripleo_dense,tripleo_profile_tasks,tripleo_states" -CONTAINER_IMAGE_PREPARE_LOG_FILE = "container_image_prepare.log" -DEFAULT_CONTAINER_REGISTRY = "quay.io" -DEFAULT_CONTAINER_NAMESPACE = "tripleomastercentos9" -DEFAULT_CONTAINER_NAME_PREFIX = "openstack-" -DEFAULT_CONTAINER_TAG = "current-tripleo" -DEFAULT_RESOURCE_REGISTRY = 'overcloud-resource-registry-puppet.yaml' - -if os.path.isfile(kolla_builder.DEFAULT_PREPARE_FILE): - kolla_builder.init_prepare_defaults(kolla_builder.DEFAULT_PREPARE_FILE) - DEFAULT_CONTAINER_IMAGE_PARAMS = kolla_builder.CONTAINER_IMAGES_DEFAULTS -else: - DEFAULT_CONTAINER_IMAGE_PARAMS = { - 'namespace': '{}/{}'.format( - DEFAULT_CONTAINER_REGISTRY, - DEFAULT_CONTAINER_NAMESPACE - ), - 'name_prefix': DEFAULT_CONTAINER_NAME_PREFIX, - 'tag': DEFAULT_CONTAINER_TAG - } -DEFAULT_HEAT_CONTAINER = ('{}/{}heat-all:{}'.format( - DEFAULT_CONTAINER_IMAGE_PARAMS['namespace'], - DEFAULT_CONTAINER_IMAGE_PARAMS['name_prefix'], - DEFAULT_CONTAINER_IMAGE_PARAMS['tag'])) -DEFAULT_HEAT_API_CONTAINER = ('{}/{}heat-api:{}'.format( - DEFAULT_CONTAINER_IMAGE_PARAMS['namespace'], - DEFAULT_CONTAINER_IMAGE_PARAMS['name_prefix'], - DEFAULT_CONTAINER_IMAGE_PARAMS['tag'])) -DEFAULT_HEAT_ENGINE_CONTAINER = ('{}/{}heat-engine:{}'.format( - DEFAULT_CONTAINER_IMAGE_PARAMS['namespace'], - DEFAULT_CONTAINER_IMAGE_PARAMS['name_prefix'], - DEFAULT_CONTAINER_IMAGE_PARAMS['tag'])) -DEFAULT_EPHEMERAL_HEAT_CONTAINER = \ - 'localhost/tripleo/openstack-heat-all:ephemeral' -DEFAULT_EPHEMERAL_HEAT_API_CONTAINER = \ - 'localhost/tripleo/openstack-heat-api:ephemeral' -DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER = \ - 'localhost/tripleo/openstack-heat-engine:ephemeral' - - -USER_PARAMETERS = 'user-environments/tripleoclient-parameters.yaml' -PASSWORDS_ENV_FORMAT = '{}-passwords.yaml' - -# This directory may contain additional environments to use during deploy -DEFAULT_ENV_DIRECTORY = os.path.join(os.environ.get('HOME', '~/'), - '.tripleo', 'environments') -TRIPLEO_PUPPET_MODULES = "/usr/share/openstack-puppet/modules/" -PUPPET_MODULES = "/etc/puppet/modules/" -PUPPET_BASE = "/etc/puppet/" - -STACK_TIMEOUT = 60 -STACK_OUTPUTS = ['BlacklistedHostnames', - 'RoleNetIpMap', - 'BlacklistedIpAddresses', - 'RoleNetHostnameMap', - 'KeystoneAdminVip', - 'KeystoneRegion', - 'KeystoneURL', - 'EndpointMap', - 'VipMap', - 'EnabledServices', - 'HostsEntry', - 'AdminPassword', - 'GlobalConfig'] - -IRONIC_HTTP_BOOT_BIND_MOUNT = '/var/lib/ironic/httpboot' -IRONIC_LOCAL_IMAGE_PATH = '/var/lib/ironic/images' - -# The default minor update ansible playbooks generated from heat stack output -MINOR_UPDATE_PLAYBOOKS = ['update_steps_playbook.yaml'] -# The default major upgrade ansible playbooks generated from heat stack output -MAJOR_UPGRADE_PLAYBOOKS = ["upgrade_steps_playbook.yaml", - "deploy_steps_playbook.yaml", - "post_upgrade_steps_playbook.yaml"] -MAJOR_UPGRADE_SKIP_TAGS = ['validation', 'pre-upgrade'] -EXTERNAL_UPDATE_PLAYBOOKS = ['external_update_steps_playbook.yaml'] -EXTERNAL_UPGRADE_PLAYBOOKS = ['external_upgrade_steps_playbook.yaml'] -# upgrade environment files expected by the client in the --templates -# tripleo-heat-templates default above $TRIPLEO_HEAT_TEMPLATES -UPDATE_PREPARE_ENV = "environments/lifecycle/update-prepare.yaml" -UPGRADE_PREPARE_ENV = "environments/lifecycle/upgrade-prepare.yaml" -ENABLE_SSH_ADMIN_TIMEOUT = 600 -ENABLE_SSH_ADMIN_STATUS_INTERVAL = 5 -ENABLE_SSH_ADMIN_SSH_PORT_TIMEOUT = 600 - -ADDITIONAL_ARCHITECTURES = ['ppc64le'] - -DEFAULT_VALIDATIONS_BASEDIR = "/usr/share/ansible" - -VALIDATIONS_LOG_BASEDIR = '/var/log/validations' - -DEFAULT_WORK_DIR = os.path.join(os.environ.get('HOME', '~/'), - 'config-download') - -DEFAULT_TEMPLATES_DIR = "/usr/share/python-tripleoclient/templates" - -TRIPLEO_STATIC_INVENTORY = 'tripleo-ansible-inventory.yaml' -ANSIBLE_INVENTORY = os.path.join(DEFAULT_WORK_DIR, - '{}/', TRIPLEO_STATIC_INVENTORY) -ANSIBLE_VALIDATION_DIR = "/usr/share/ansible/validation-playbooks" - -# NOTE(mwhahaha): So if we pip install tripleoclient, we need to also -# honor pulling some other files from a venv (e.g. cli playbooks, -# and container image yaml for building). This logic will create a -# constant for a venv share path which we can use to check to see if things -# like tripleo-common or tripleo-ansible have also been pip installed. -SHARE_BASE_PATH = os.path.join(sys.prefix, 'share') -if sys.prefix != '/usr' and not os.path.isdir(SHARE_BASE_PATH): - SHARE_BASE_PATH = os.path.join('/usr', 'share') - -ANSIBLE_TRIPLEO_PLAYBOOKS = os.path.join( - SHARE_BASE_PATH, 'ansible', 'tripleo-playbooks' -) -if sys.prefix != '/usr' and not os.path.isdir(ANSIBLE_TRIPLEO_PLAYBOOKS): - ANSIBLE_TRIPLEO_PLAYBOOKS = os.path.join( - '/usr', 'share', 'ansible', 'tripleo-playbooks' - ) -CONTAINER_IMAGES_BASE_PATH = os.path.join( - SHARE_BASE_PATH, "tripleo-common", "container-images" -) -if sys.prefix != "/usr" and not os.path.isdir(CONTAINER_IMAGES_BASE_PATH): - CONTAINER_IMAGES_BASE_PATH = os.path.join( - "/usr", "share", "tripleo-common", "container-images" - ) - -VALIDATION_GROUPS_INFO = "{}/groups.yaml".format(DEFAULT_VALIDATIONS_BASEDIR) - -# ctlplane network defaults -CTLPLANE_NET_NAME = 'ctlplane' -CTLPLANE_CIDR_DEFAULT = '192.168.24.0/24' -CTLPLANE_DHCP_START_DEFAULT = ['192.168.24.5'] -CTLPLANE_DHCP_END_DEFAULT = ['192.168.24.24'] -CTLPLANE_INSPECTION_IPRANGE_DEFAULT = '192.168.24.100,192.168.24.120' -CTLPLANE_GATEWAY_DEFAULT = '192.168.24.1' -CTLPLANE_DNS_NAMESERVERS_DEFAULT = [] - -# Ansible parameters used for the actions being executed during tripleo -# deploy/upgrade. Used as kwargs in the `utils.run_ansible_playbook` -# function. A playbook entry is either a string representing the name of -# one the playbook or a list of playbooks to execute. The lookup -# will search for the playbook in the work directory path. -DEPLOY_ANSIBLE_ACTIONS = { - 'deploy': { - 'playbook': 'deploy_steps_playbook.yaml' - }, - 'upgrade': { - 'playbook': 'upgrade_steps_playbook.yaml', - 'skip_tags': 'validation' - }, - 'post-upgrade': { - 'playbook': 'post_upgrade_steps_playbook.yaml', - 'skip_tags': 'validation' - }, - 'online-upgrade': { - 'playbook': 'external_upgrade_steps_playbook.yaml', - 'tags': 'online_upgrade' - }, - 'preflight-deploy': { - 'playbooks': ['undercloud-disk-space.yaml', - 'undercloud-disabled-services.yaml'] - }, - 'preflight-upgrade': { - 'playbooks': ['undercloud-disk-space-pre-upgrade.yaml', - 'undercloud-disabled-services.yaml'] - }, -} - -# Key-value pair of deprecated service and its warning message -DEPRECATED_SERVICES = {} - -# clouds_yaml related constants -CLOUD_HOME_DIR = os.path.expanduser('~' + os.environ.get('SUDO_USER', '')) -CLOUDS_YAML_DIR = os.path.join('.config', 'openstack') - -# Undercloud config and output -UNDERCLOUD_CONF_PATH = os.path.join(CLOUD_HOME_DIR, "undercloud.conf") -try: - if os.path.exists(UNDERCLOUD_CONF_PATH): - config = configparser.ConfigParser() - config.read(UNDERCLOUD_CONF_PATH) - UNDERCLOUD_OUTPUT_DIR = config.get('DEFAULT', 'output_dir') - else: - raise FileNotFoundError -except (configparser.NoOptionError, FileNotFoundError): - UNDERCLOUD_OUTPUT_DIR = CLOUD_HOME_DIR - -# regex patterns to exclude when looking for unused params -# - exclude *Image params as they may be unused because the service is not -# enabled -# - exclude PythonInterpreter because it's generated by us and only used -# in some custom scripts -UNUSED_PARAMETER_EXCLUDES_RE = ['^(Docker|Container).*Image$', - '^PythonInterpreter$'] - -EXPORT_PASSWORD_EXCLUDE_PATTERNS = [ - 'ceph.*' -] - -EXPORT_DATA = { - "EndpointMap": { - "parameter": "EndpointMapOverride", - }, - "HostsEntry": { - "parameter": "ExtraHostFileEntries", - }, - "GlobalConfig": { - "parameter": "GlobalConfigExtraMapData", - }, - "AllNodesConfig": { - "file": "group_vars/overcloud.json", - "parameter": "AllNodesExtraMapData", - "filter": ["oslo_messaging_notify_short_bootstrap_node_name", - "oslo_messaging_notify_node_names", - "oslo_messaging_rpc_node_names", - "memcached_node_ips", - "ovn_dbs_vip", - "ovn_dbs_node_ips", - "redis_vip"]}, - } - -# Package that need to be to the latest before undercloud -# update/update -UNDERCLOUD_EXTRA_PACKAGES = [ - "python3-tripleoclient", - "openstack-tripleo-common", - "openstack-tripleo-heat-templates", - "openstack-tripleo-validations", - "tripleo-ansible" -] - -UPGRADE_PROMPT = _('You are about to run a UPGRADE command. ' - 'It is strongly recommended to perform a backup ' - 'before the upgrade. Are you sure you want to ' - 'upgrade [y/N]?') -UPGRADE_NO = _('User did not confirm upgrade, so exiting. ' - 'Consider using the --yes/-y parameter if you ' - 'prefer to skip this warning in the future') -UPDATE_PROMPT = _('You are about to run a UPDATE command. ' - 'It is strongly recommended to perform a backup ' - 'before the update. Are you sure you want to ' - 'update [y/N]?') -UPDATE_NO = _('User did not confirm update, so exiting. ' - 'Consider using the --yes/-y parameter if you ' - 'prefer to skip this warning in the future') - -DEFAULT_PARTITION_IMAGE = 'overcloud-full.qcow2' -DEFAULT_WHOLE_DISK_IMAGE = 'overcloud-hardened-uefi-full.qcow2' - -FIPS_COMPLIANT_HASHES = {'sha1', 'sha224', 'sha256', 'sha384', 'sha512'} - -# Work-Dir default file names -WD_DEFAULT_ROLES_FILE_NAME = 'tripleo-{}-roles-data.yaml' -WD_DEFAULT_NETWORKS_FILE_NAME = 'tripleo-{}-network-data.yaml' -WD_DEFAULT_VIP_FILE_NAME = 'tripleo-{}-virtual-ips.yaml' -WD_DEFAULT_BAREMETAL_FILE_NAME = 'tripleo-{}-baremetal-deployment.yaml' -KIND_TEMPLATES = {'roles': WD_DEFAULT_ROLES_FILE_NAME, - 'networks': WD_DEFAULT_NETWORKS_FILE_NAME, - 'baremetal': WD_DEFAULT_BAREMETAL_FILE_NAME, - 'vips': WD_DEFAULT_VIP_FILE_NAME} - -STACK_ENV_FILE_NAME = 'tripleo-{}-environment.yaml' -# Disk usage percentages to check as related to deploy backups -DEPLOY_BACKUPS_USAGE_PERCENT = 50 -DISK_USAGE_PERCENT = 80 diff --git a/tripleoclient/exceptions.py b/tripleoclient/exceptions.py deleted file mode 100644 index ca50d2df5..000000000 --- a/tripleoclient/exceptions.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Exception definitions""" - - -class Base(Exception): - """Base TripleO exception.""" - - -class WorkflowServiceError(Base): - """The service type is unknown""" - - -class WebSocketConnectionClosed(Base): - """Websocket connection is closed before wait for messages""" - - -class NotFound(Base): - """Resource not found""" - - -class LookupError(Base): - """Lookup Error""" - - -class DeploymentError(Base): - """Deployment failed""" - - -class PlanEnvWorkflowError(Base): - """Plan Environment workflow has failed""" - - -class ConfigDownloadInProgress(Base): - """Unable to deploy as config download already in progress""" - - msg_format = ("Config download already in progress with " - "execution id {} for stack {}") - - def __init__(self, execution_id='', stack=''): - message = self.msg_format.format(execution_id, stack) - super(ConfigDownloadInProgress, self).__init__(message) - - -class RootUserExecution(Base): - """Command was executed by a root user""" - - -class RootDeviceDetectionError(Base): - """Failed to detect the root device""" - - -class InvalidConfiguration(Base, ValueError): - """Invalid parameters were specified for the deployment""" - - -class IntrospectionError(Base): - """Introspection failed""" - - -class RegisterOrUpdateError(WorkflowServiceError): - """Introspection failed""" - - -class NodeProvideError(WorkflowServiceError): - """Node Provide failed.""" - - -class NodeConfigurationError(WorkflowServiceError): - """Node Configuration failed.""" - - -class ProfileMatchingError(Base): - """Failed to validate or assign node profiles""" - - -class PlanCreationError(Base): - """Plan creation failed""" - - -class PlanExportError(Base): - """Plan export failed""" - - -class WorkflowActionError(Base): - """Workflow action failed""" - msg_format = "Action {} execution failed: {}" - - def __init__(self, action='', output=''): - message = self.msg_format.format(action, output) - super(WorkflowActionError, self).__init__(message) - - -class DownloadError(Base): - """Download attempt failed""" - - -class LogFetchError(Base): - """Fetching logs failed""" - - -class ContainerDeleteFailed(Base): - """Container deletion failed""" - - -class UndercloudUpgradeNotConfirmed(Base): - """Undercloud upgrade security question not confirmed.""" - - -class OvercloudUpdateNotConfirmed(Base): - """Overcloud Update security question not confirmed.""" - - -class OvercloudUpgradeNotConfirmed(Base): - """Overcloud Update security question not confirmed.""" - - -class CellExportError(Base): - """Cell export failed""" - - -class BannedParameters(Base): - """Some of the environment parameters provided should be removed""" - - -class HeatPodMessageQueueException(Base): - """Heat messaging queue not created""" - - -class InvalidPlaybook(Base): - """Invalid playbook path specified""" - - -class NoNodeFound(Base): - """No nodes matching specifications found""" - def __init__(self): - message = "No nodes matching specifications could be found. " - super(NoNodeFound, self).__init__(message) diff --git a/tripleoclient/export.py b/tripleoclient/export.py deleted file mode 100644 index af7f57612..000000000 --- a/tripleoclient/export.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -import logging -import os -import re -import yaml - -from osc_lib.i18n import _ - -from tripleo_common.utils import plan as plan_utils -from tripleoclient import constants -from tripleoclient import utils as oooutils - - -LOG = logging.getLogger(__name__ + ".utils") - - -def export_passwords(working_dir, stack, excludes=True): - """For each password, check if it's excluded, then check if there's a user - defined value from parameter_defaults, and if not use the value from the - generated passwords. - :param working_dir: Working dir for the deployment - :type working_dir: string - :param stack: stack name for password generator - :type stack: string - :param excludes: filter the passwords or not, defaults to `True` - :type excludes: bool - :returns: filtered password dictionary - :rtype: dict - """ - - def exclude_password(password): - for pattern in constants.EXPORT_PASSWORD_EXCLUDE_PATTERNS: - if re.match(pattern, password, re.I): - return True - - passwords_file = os.path.join( - working_dir, - constants.PASSWORDS_ENV_FORMAT.format(stack)) - with open(passwords_file) as f: - passwords_env = yaml.safe_load(f.read()) - generated_passwords = plan_utils.generate_passwords( - passwords_env=passwords_env) - - filtered_passwords = generated_passwords.copy() - - if excludes: - for password in generated_passwords: - if exclude_password(password): - filtered_passwords.pop(password, None) - - return filtered_passwords - - -def export_stack(working_dir, stack, should_filter=False, - config_download_dir=constants.DEFAULT_WORK_DIR): - """Export stack information. - Iterates over parameters selected for export and loads - additional data from the referenced files. - - :param working_dir: Working dir for the deployment - :type working_dir: string - :param stack: stack name for password generator - :type stack: string - :params should_filter: - should the export only include values with keys - defined in the 'filter' list. Defaults to `False` - :type should_filter: bool - :param config_download_dir: - path to download directory, - defaults to `constants.DEFAULT_WORK_DIR` - :type config_download_dir: string - - :returns: data to export - :rtype: dict - - The function detetermines what data to export using information, - obtained from the preset `tripleoclient.constants.EXPORT_DATA` dictionary. - parameter: Parameter to be exported - file: If file is specified it is taken as source instead of heat - output. File is relative to /stack. - filter: in case only specific settings should be - exported from parameter data. - """ - - data = {} - - for export_key, export_param in constants.EXPORT_DATA.items(): - param = export_param["parameter"] - - if "file" in export_param: - # get file data - file = os.path.join(config_download_dir, - stack, - export_param["file"]) - export_data = oooutils.get_parameter_file(file) - else: - # get stack data - export_data = oooutils.get_stack_saved_output_item( - export_key, working_dir) - - if export_data: - # When we export information from a cell controller stack - # we don't want to filter. - if "filter" in export_param and should_filter: - for filter_key in export_param["filter"]: - if filter_key in export_data: - element = {filter_key: export_data[filter_key]} - data.setdefault(param, {}).update(element) - else: - data[param] = export_data - - else: - LOG.warning("No data returned to export %s from." % param) - - # Check if AuthCloudName is in the stack environment, and if so add it to - # the export data. Otherwise set it to the exported stack's name. - auth_cloud_name = oooutils.get_stack_saved_output_item( - 'AuthCloudName', working_dir) - if auth_cloud_name: - data['AuthCloudName'] = auth_cloud_name - else: - data['AuthCloudName'] = stack - - # Check if AuthCloudName is in the stack environment, and if so add it to - # the export data. Otherwise set it to the exported stack's name. - auth_cloud_name = oooutils.get_stack_saved_output_item( - 'AuthCloudName', working_dir) - if auth_cloud_name: - data['AuthCloudName'] = auth_cloud_name - else: - data['AuthCloudName'] = stack - - return data - - -def export_ceph_net_key(stack, config_download_dir=constants.DEFAULT_WORK_DIR): - file = os.path.join(config_download_dir, stack, "global_vars.yaml") - with open(file, 'r') as ff: - try: - global_data = yaml.safe_load(ff) - except yaml.MarkedYAMLError as e: - LOG.error( - _('Could not read file %s') % file) - LOG.error(e) - return str(global_data['service_net_map']['ceph_mon_network']) + '_ip' - - -def export_storage_ips(stack, config_download_dir=constants.DEFAULT_WORK_DIR, - ceph_net_key=''): - if len(ceph_net_key) == 0: - ceph_net_key = export_ceph_net_key(stack, config_download_dir) - inventory_file = "ceph-ansible/inventory.yml" - file = os.path.join(config_download_dir, stack, inventory_file) - with open(file, 'r') as ff: - try: - inventory_data = yaml.safe_load(ff) - except yaml.MarkedYAMLError as e: - LOG.error( - _('Could not read file %s') % file) - LOG.error(e) - mon_ips = [] - for mon_role in inventory_data['mons']['children'].keys(): - for hostname in inventory_data[mon_role]['hosts']: - ip = inventory_data[mon_role]['hosts'][hostname][ceph_net_key] - mon_ips.append(ip) - - return mon_ips - - -def export_ceph(stack, cephx, - config_download_dir=constants.DEFAULT_WORK_DIR, - mon_ips=[], config_download_files=[]): - # Return a map of ceph data for a list item in CephExternalMultiConfig - # by parsing files within the config_download_dir of a certain stack - - if len(config_download_files) == 0: - config_download_files = os.listdir(os.path.join( - config_download_dir, stack)) - if 'ceph-ansible' in config_download_files: - if len(mon_ips) == 0: - mon_ips = export_storage_ips(stack, config_download_dir) - external_cluster_mon_ips = str(','.join(mon_ips)) - - # Use ceph-ansible group_vars/all.yml to get remaining values - ceph_ansible_all = "ceph-ansible/group_vars/all.yml" - file = os.path.join(config_download_dir, stack, ceph_ansible_all) - with open(file, 'r') as ff: - try: - ceph_data = yaml.safe_load(ff) - except yaml.MarkedYAMLError as e: - LOG.error( - _('Could not read file %s') % file) - LOG.error(e) - cluster = ceph_data['cluster'] - fsid = ceph_data['fsid'] - - if 'cephadm' in config_download_files: - file = os.path.join(oooutils.get_default_working_dir(stack), - 'ceph_client.yml') - if not os.path.exists(file): - # fall back to old path if user had LP 1978846 during deployment - file = "/home/stack/ceph_client.yaml" - with open(file, 'r') as ff: - try: - ceph_data = yaml.safe_load(ff) - except yaml.MarkedYAMLError as e: - LOG.error( - _('Could not read file %s') % file) - LOG.error(e) - external_cluster_mon_ips = ceph_data['external_cluster_mon_ips'] - cluster = ceph_data['tripleo_ceph_client_cluster'] - fsid = ceph_data['tripleo_ceph_client_fsid'] - - # set cephx_keys - for key in ceph_data['keys']: - if key['name'] == 'client.' + str(cephx): - cephx_keys = [key] - # set ceph_conf_overrides - ceph_conf_overrides = {} - ceph_conf_overrides['client'] = {} - ceph_conf_overrides['client']['keyring'] = '/etc/ceph/' \ - + cluster \ - + '.client.' + cephx \ - + '.keyring' - # Combine extracted data into one map to return - data = {} - data['external_cluster_mon_ips'] = external_cluster_mon_ips - data['keys'] = cephx_keys - data['ceph_conf_overrides'] = ceph_conf_overrides - data['cluster'] = cluster - data['fsid'] = fsid - data['dashboard_enabled'] = False - - return data - - -def export_overcloud(working_dir, stack, excludes, should_filter, - config_download_dir): - data = export_passwords(working_dir, stack, excludes) - data.update(export_stack( - working_dir, stack, should_filter, config_download_dir)) - # do not add extra host entries for VIPs for stacks deployed off that - # exported data, since it already contains those entries - data.update({'AddVipsToEtcHosts': False}) - data = dict(parameter_defaults=data) - return data diff --git a/tripleoclient/heat_launcher.py b/tripleoclient/heat_launcher.py deleted file mode 100644 index 4760c8b79..000000000 --- a/tripleoclient/heat_launcher.py +++ /dev/null @@ -1,796 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import configparser -import datetime -import glob -import grp -import json -import logging -import multiprocessing -import os -import pwd -import shutil -import signal -import subprocess -import tarfile -import tempfile -import time - -import jinja2 -from oslo_utils import timeutils -from tenacity import retry, retry_if_exception_type, retry_if_exception_message -from tenacity.stop import stop_after_attempt, stop_after_delay -from tenacity.wait import wait_fixed - -from tripleoclient.constants import (DEFAULT_HEAT_CONTAINER, - DEFAULT_HEAT_API_CONTAINER, - DEFAULT_HEAT_ENGINE_CONTAINER, - DEFAULT_EPHEMERAL_HEAT_API_CONTAINER, - DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER, - DEFAULT_TEMPLATES_DIR, - EPHEMERAL_HEAT_POD_NAME) -from tripleoclient.exceptions import HeatPodMessageQueueException -from tripleoclient import utils as oooutils - -log = logging.getLogger(__name__) - -NEXT_DAY = (timeutils.utcnow() + datetime.timedelta(days=2)).isoformat() - -FAKE_TOKEN_RESPONSE = { - "token": { - "is_domain": False, - "methods": ["password"], - "roles": [{ - "id": "4c8de39b96794ab28bf37a0b842b8bc8", - "name": "admin" - }], - "expires_at": NEXT_DAY, - "project": { - "domain": { - "id": "default", - "name": "Default" - }, - "id": "admin", - "name": "admin" - }, - "catalog": [{ - "endpoints": [{ - "url": "http://127.0.0.1:%(heat_port)s/v1/admin", - "interface": "public", - "region": "regionOne", - "region_id": "regionOne", - "id": "2809305628004fb391b3d0254fb5b4f7" - }, { - "url": "http://127.0.0.1:%(heat_port)s/v1/admin", - "interface": "internal", - "region": "regionOne", - "region_id": "regionOne", - "id": "2809305628004fb391b3d0254fb5b4f7" - }, { - "url": "http://127.0.0.1:%(heat_port)s/v1/admin", - "interface": "admin", - "region": "regionOne", - "region_id": "regionOne", - "id": "2809305628004fb391b3d0254fb5b4f7" - }], - "type": "orchestration", - "id": "96a549e3961d45cabe883dd17c5835be", - "name": "heat" - }, { - "endpoints": [{ - "url": "http://127.0.0.1/v3", - "interface": "public", - "region": "regionOne", - "region_id": "regionOne", - "id": "eca215878e404a2d9dcbcc7f6a027165" - }, { - "url": "http://127.0.0.1/v3", - "interface": "internal", - "region": "regionOne", - "region_id": "regionOne", - "id": "eca215878e404a2d9dcbcc7f6a027165" - }, { - "url": "http://127.0.0.1/v3", - "interface": "admin", - "region": "regionOne", - "region_id": "regionOne", - "id": "eca215878e404a2d9dcbcc7f6a027165" - }], - "type": "identity", - "id": "a785f0b7603042d1bf59237c71af2f15", - "name": "keystone" - }], - "user": { - "domain": { - "id": "default", - "name": "Default" - }, - "id": "8b7b4c094f934e8c83aa7fe12591dc6c", - "name": "admin" - }, - "audit_ids": ["F6ONJ8fCT6i_CFTbmC0vBA"], - "issued_at": datetime.datetime.utcnow().isoformat() - } -} - - -class HeatBaseLauncher(object): - - # The init function will need permission to touch these files - # and chown them accordingly for the heat user - def __init__(self, api_port=8006, - all_container_image=DEFAULT_HEAT_CONTAINER, - api_container_image=DEFAULT_HEAT_API_CONTAINER, - engine_container_image=DEFAULT_HEAT_ENGINE_CONTAINER, - user='heat', - heat_dir='/var/log/heat-launcher', - use_tmp_dir=True, - use_root=False, - rm_heat=False, - skip_heat_pull=False): - self.api_port = api_port - self.all_container_image = all_container_image - self.api_container_image = api_container_image - self.engine_container_image = engine_container_image - self.heat_dir = os.path.abspath(heat_dir) - self.host = "127.0.0.1" - self.timestamp = time.time() - self.db_dump_path = os.path.join(self.heat_dir, 'heat-db.sql') - self.skip_heat_pull = skip_heat_pull - self.zipped_db_suffix = '.tar.bzip2' - self.log_dir = os.path.join(self.heat_dir, 'log') - self.use_tmp_dir = use_tmp_dir - - if not os.path.isdir(self.heat_dir): - # Create the directory if it doesn't exist. - try: - os.makedirs(self.heat_dir, mode=0o755) - except Exception as e: - log.error('Creating temp directory "%s" failed: %s' % - (self.heat_dir, e)) - raise Exception('Could not create temp directory %s: %s' % - (self.heat_dir, e)) - - if self.use_tmp_dir: - self.install_dir = tempfile.mkdtemp( - prefix='%s/tripleo_deploy-' % self.heat_dir) - else: - self.install_dir = self.heat_dir - - if use_root: - self.umount_install_dir() - - if use_root and use_tmp_dir: - # As an optimization we mount the tmp directory in a tmpfs (in - # memory) filesystem. Depending on your system this can cut the - # heat deployment times by half. - p = subprocess.Popen(['mount', '-t', 'tmpfs', '-o', 'size=500M', - 'tmpfs', self.install_dir], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - cmd_stdout, cmd_stderr = p.communicate() - retval = p.returncode - if retval != 0: - # It's ok if this fails, it will still work. It just won't - # be on tmpfs. - log.warning('Unable to mount tmpfs for logs and ' - 'database %s: %s' % - (self.heat_dir, cmd_stderr)) - - self.log_file = self._get_log_file_path() - self.sql_db = os.path.join(self.install_dir, 'heat.sqlite') - self.config_file = os.path.join(self.install_dir, 'heat.conf') - self.paste_file = os.path.join(self.install_dir, 'api-paste.ini') - self.token_file = os.path.join(self.install_dir, 'token_file.json') - - self.user = user - self._write_fake_keystone_token(self.api_port, self.token_file) - self._write_heat_config() - self._write_api_paste_config() - if use_root: - uid = int(self.get_heat_uid()) - gid = int(self.get_heat_gid()) - os.chown(self.install_dir, uid, gid) - os.chown(self.config_file, uid, gid) - os.chown(self.paste_file, uid, gid) - - if rm_heat: - self.kill_heat(None) - self.rm_heat() - - def umount_install_dir(self): - # This one may fail but it's just cleanup. - p = subprocess.Popen(['umount', self.install_dir], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - cmd_stdout, cmd_stderr = p.communicate() - retval = p.returncode - if retval != 0: - log.info('Cleanup unmount of %s failed (probably because ' - 'it was not mounted): %s' % - (self.heat_dir, cmd_stderr)) - else: - log.info('umount of %s success' % (self.heat_dir)) - - def _get_log_file_path(self): - return os.path.join(self.install_dir, 'heat.log') - - def _write_heat_config(self): - # TODO(ksambor) It will be nice to have possibilities to configure heat - heat_config = ''' -[DEFAULT] -log_file = %(log_file)s -transport_url = 'fake://' -rpc_response_timeout = 600 -deferred_auth_method = password -num_engine_workers=1 -convergence_engine = true -max_json_body_size = 8388608 -heat_metadata_server_url=http://127.0.0.1:%(api_port)s/ -default_deployment_signal_transport = HEAT_SIGNAL -max_nested_stack_depth = 10 -keystone_backend = heat.engine.clients.os.keystone.fake_keystoneclient\ -.FakeKeystoneClient - -[noauth] -token_response = %(token_file)s - -[heat_all] -enabled_services = api,engine - -[heat_api] -workers = 1 -bind_host = 127.0.0.1 -bind_port = %(api_port)s - -[database] -connection = sqlite:///%(sqlite_db)s.db - -[paste_deploy] -flavor = noauth -api_paste_config = api-paste.ini - -[yaql] -memory_quota=900000 -limit_iterators=9000 - ''' % {'sqlite_db': self.sql_db, 'log_file': self.log_file, - 'api_port': self.api_port, - 'token_file': self.token_file} - - with open(self.config_file, 'w') as temp_file: - temp_file.write(heat_config) - - def _write_api_paste_config(self): - - heat_api_paste_config = ''' -[pipeline:heat-api-noauth] -pipeline = faultwrap noauth context versionnegotiation apiv1app -[app:apiv1app] -paste.app_factory = heat.common.wsgi:app_factory -heat.app_factory = heat.api.openstack.v1:API -[filter:noauth] -paste.filter_factory = heat.common.noauth:filter_factory -[filter:context] -paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory -[filter:versionnegotiation] -paste.filter_factory = heat.common.wsgi:filter_factory -heat.filter_factory = heat.api.openstack:version_negotiation_filter -[filter:faultwrap] -paste.filter_factory = heat.common.wsgi:filter_factory -heat.filter_factory = heat.api.openstack:faultwrap_filter -''' - with open(self.paste_file, 'w') as temp_file: - temp_file.write(heat_api_paste_config) - - def _write_fake_keystone_token(self, heat_api_port, config_file): - ks_token = json.dumps(FAKE_TOKEN_RESPONSE) % {'heat_port': - heat_api_port} - with open(config_file, 'w') as temp_file: - temp_file.write(ks_token) - - def get_heat_uid(self): - return pwd.getpwnam(self.user).pw_uid - - def get_heat_gid(self): - return grp.getgrnam(self.user).gr_gid - - def check_database(self): - return True - - def check_message_bus(self): - return True - - def tar_file(self, file_path, cleanup=True): - tf_name = '{}-{}{}'.format(file_path, self.timestamp, - self.zipped_db_suffix) - tf = tarfile.open(tf_name, 'w:bz2') - tf.add(file_path, os.path.basename(file_path)) - tf.close() - log.info("Created tarfile {}".format(tf_name)) - if cleanup: - log.info("Deleting {}".format(file_path)) - os.unlink(file_path) - - def untar_file(self, tar_path, extract_dir): - tf = tarfile.open(tar_path, 'r:bz2') - tf.extractall(extract_dir) - - def rm_heat(self, backup_db=True): - pass - - -class HeatContainerLauncher(HeatBaseLauncher): - - heat_type = 'container' - - def __init__(self, *args, **kwargs): - super(HeatContainerLauncher, self).__init__(*args, **kwargs) - self._fetch_container_image() - self.host = "127.0.0.1" - - def _fetch_container_image(self): - if self.skip_heat_pull: - log.info("Skipping container image pull.") - return - # force pull of latest container image - cmd = ['podman', 'pull', self.all_container_image] - log.debug(' '.join(cmd)) - try: - subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - raise Exception('Unable to fetch container image {}.' - 'Error: {}'.format(self.all_container_image, e)) - - def launch_heat(self): - # run the heat-all process - cmd = [ - 'podman', 'run', '--rm', - '--name', 'heat_all', - '--user', self.user, - '--net', 'host', - '--volume', '%(conf)s:/etc/heat/heat.conf:ro' % {'conf': - self.config_file}, - '--volume', '%(conf)s:/etc/heat/api-paste.ini:ro' % { - 'conf': self.paste_file}, - '--volume', '%(inst_tmp)s:%(inst_tmp)s:Z' % {'inst_tmp': - self.install_dir}, - self.all_container_image, 'heat-all' - ] - log.debug(' '.join(cmd)) - os.execvp('podman', cmd) - - def heat_db_sync(self): - - cmd = [ - 'podman', 'run', '--rm', - '--net', 'host', - '--user', self.user, - '--volume', '%(conf)s:/etc/heat/heat.conf:Z' % {'conf': - self.config_file}, - '--volume', '%(inst_tmp)s:%(inst_tmp)s:Z' % {'inst_tmp': - self.install_dir}, - self.all_container_image, - 'heat-manage', 'db_sync'] - log.debug(' '.join(cmd)) - subprocess.check_call(cmd) - - def get_heat_uid(self): - cmd = [ - 'podman', 'run', '--rm', - '--net', 'host', - self.all_container_image, - 'getent', 'passwd', self.user - ] - log.debug(' '.join(cmd)) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - universal_newlines=True) - result = p.communicate()[0] - if result: - return result.split(':')[2] - raise Exception('Could not find heat uid') - - def get_heat_gid(self): - cmd = [ - 'podman', 'run', '--rm', - '--net', 'host', - self.all_container_image, - 'getent', 'group', self.user - ] - log.debug(' '.join(cmd)) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - universal_newlines=True) - result = p.communicate()[0] - if result: - return result.split(':')[2] - raise Exception('Could not find heat gid') - - def kill_heat(self, pid): - cmd = ['podman', 'stop', 'heat_all'] - log.debug(' '.join(cmd)) - # We don't want to hear from this command.. - subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - def rm_heat(self, backup_db=True): - cmd = ['podman', 'rm', 'heat_all'] - log.debug(' '.join(cmd)) - # We don't want to hear from this command.. - subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - -class HeatNativeLauncher(HeatBaseLauncher): - - heat_type = 'native' - - def __init__(self, *args, **kwargs): - super(HeatNativeLauncher, self).__init__(*args, **kwargs) - self.host = "127.0.0.1" - - def launch_heat(self): - os.execvp('heat-all', ['heat-all', '--config-file', self.config_file]) - - def heat_db_sync(self, restore_db=False): - subprocess.check_call(['heat-manage', '--config-file', - self.config_file, 'db_sync']) - - def kill_heat(self, pid): - os.kill(pid, signal.SIGKILL) - if self.use_tmp_dir: - shutil.copytree( - self.install_dir, - os.path.join(self.heat_dir, - 'tripleo_deploy-%s' % self.timestamp)) - self.umount_install_dir() - self._remove_install_dir() - - @retry(retry=(retry_if_exception_type(OSError) | - retry_if_exception_message('Device or resource busy')), - reraise=True, - stop=(stop_after_delay(10) | stop_after_attempt(10)), - wait=wait_fixed(0.5)) - def _remove_install_dir(self): - shutil.rmtree(self.install_dir) - - -class HeatPodLauncher(HeatContainerLauncher): - - heat_type = 'pod' - - def __init__(self, *args, **kwargs): - super(HeatPodLauncher, self).__init__(*args, **kwargs) - if not os.path.isdir(self.log_dir): - os.makedirs(self.log_dir) - self.host = "127.0.0.1" - self._chcon() - - def _chcon(self): - subprocess.check_call( - ['chcon', '-R', '-t', 'container_file_t', - '-l', 's0', self.heat_dir]) - - def _fetch_container_image(self): - # Skip trying to pull the images if they are set to the default - # as they can't be pulled since they are tagged as localhost. - # If the images are missing for some reason, podman will still pull - # them by default, and error appropriately if needed. - if (self.api_container_image == - DEFAULT_EPHEMERAL_HEAT_API_CONTAINER or - self.engine_container_image == - DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER): - skip_heat_pull = True - else: - skip_heat_pull = self.skip_heat_pull - if skip_heat_pull: - log.info("Skipping container image pull.") - return - # force pull of latest container image - for image in self.api_container_image, self.engine_container_image: - log.info("Pulling container image {}.".format(image)) - cmd = ['sudo', 'podman', 'pull', image] - log.debug(' '.join(cmd)) - try: - subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - raise Exception('Unable to fetch container image {}.' - 'Error: {}'.format(image, e)) - - def get_pod_state(self): - inspect = subprocess.run([ - 'sudo', 'podman', 'pod', 'inspect', '--format', - '"{{.State}}"', EPHEMERAL_HEAT_POD_NAME], - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - return self._decode(inspect.stdout) - - def launch_heat(self): - if "Running" in self.get_pod_state(): - log.info("%s pod already running, skipping launch", - EPHEMERAL_HEAT_POD_NAME) - return - self._write_heat_pod() - subprocess.check_call([ - 'sudo', 'podman', 'play', 'kube', - os.path.join(self.heat_dir, 'heat-pod.yaml') - ]) - - def heat_db_sync(self, restore_db=False): - if not self.database_exists(): - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysql', '-e', 'create database heat' - ]) - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysql', '-e', - 'create user if not exists ' - '\'heat\'@\'%\' identified by \'heat\'' - ]) - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysql', 'heat', '-e', - 'grant all privileges on heat.* to \'heat\'@\'%\'' - ]) - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysql', '-e', 'flush privileges;' - ]) - cmd = [ - 'sudo', 'podman', 'run', '--rm', - '--user', 'heat', - '--net', 'host', - '--volume', '%(conf)s:/etc/heat/heat.conf:z' % {'conf': - self.config_file}, - '--volume', '%(inst_tmp)s:%(inst_tmp)s:z' % {'inst_tmp': - self.install_dir}, - self.api_container_image, - 'heat-manage', 'db_sync'] - log.debug(' '.join(cmd)) - subprocess.check_call(cmd) - if restore_db: - self.do_restore_db() - - def do_restore_db(self, db_dump_path=None): - if not db_dump_path: - db_dump_path = self.db_dump_path - # Find the latest dump from self.heat_dir - db_dumps = glob.glob( - '{}-*{}'.format - (db_dump_path, - self.zipped_db_suffix)) - if not db_dumps: - raise Exception('No db backups found to restore in %s' % - self.heat_dir) - db_dump = max(db_dumps, key=os.path.getmtime) - self.untar_file(db_dump, self.heat_dir) - log.info("Restoring db from {}".format(db_dump)) - try: - with open(db_dump_path) as f: - subprocess.run([ - 'sudo', 'podman', 'exec', '-i', '-u', 'root', - 'mysql', 'mysql', 'heat'], stdin=f, - check=True) - finally: - os.unlink(db_dump_path) - - def do_backup_db(self, db_dump_path=None): - if not db_dump_path: - db_dump_path = self.db_dump_path - if os.path.exists(db_dump_path): - raise Exception("Won't overwrite existing db dump at %s. " - "Remove it first." % db_dump_path) - log.info("Starting back up of heat db") - with open(db_dump_path, 'w') as out: - subprocess.run([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysqldump', 'heat'], stdout=out, - check=True) - - self.tar_file(db_dump_path) - - def pod_exists(self): - try: - subprocess.check_call( - ['sudo', 'podman', 'pod', 'inspect', EPHEMERAL_HEAT_POD_NAME], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - return True - except subprocess.CalledProcessError: - return False - - def rm_heat(self, backup_db=True): - if self.database_exists(): - if backup_db: - self.do_backup_db() - try: - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysql', 'heat', '-e', - 'drop database heat']) - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', - 'mysql', 'mysql', '-e', - 'drop user \'heat\'@\'%\'']) - except subprocess.CalledProcessError: - pass - if self.pod_exists(): - log.info("Removing pod: %s", EPHEMERAL_HEAT_POD_NAME) - subprocess.call([ - 'sudo', 'podman', 'pod', 'rm', '-f', - EPHEMERAL_HEAT_POD_NAME - ]) - config = self._read_heat_config() - log_file_path = os.path.join(self.log_dir, - config['DEFAULT']['log_file']) - if os.path.exists(log_file_path): - self.tar_file(log_file_path) - - def stop_heat(self): - if self.pod_exists() and self.get_pod_state() != 'Exited': - log.info("Stopping pod: %s", EPHEMERAL_HEAT_POD_NAME) - subprocess.check_call([ - 'sudo', 'podman', 'pod', 'stop', - EPHEMERAL_HEAT_POD_NAME - ]) - log.info("Stopped pod: %s", EPHEMERAL_HEAT_POD_NAME) - - def check_message_bus(self): - log.info("Checking that message bus (rabbitmq) is up") - try: - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', 'rabbitmq', - 'rabbitmqctl', 'list_queues'], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - return True - except subprocess.CalledProcessError as cpe: - log.error("The message bus (rabbitmq) does not seem " - "to be available") - log.error(cpe) - raise - - def check_database(self): - log.info("Checking that database is up") - try: - subprocess.check_call([ - 'sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-h', self._get_ctlplane_ip(), '-e', - 'show databases;'], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - return True - except subprocess.CalledProcessError as cpe: - log.error("The database does not seem to be available") - log.error(cpe) - raise - - def database_exists(self): - output = subprocess.check_output([ - 'sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', 'show databases like "heat"' - ]) - return 'heat' in str(output) - - def kill_heat(self, pid): - if self.pod_exists(): - log.info("Killing pod: %s", EPHEMERAL_HEAT_POD_NAME) - subprocess.call([ - 'sudo', 'podman', 'pod', 'kill', - EPHEMERAL_HEAT_POD_NAME - ]) - log.info("Killed pod: %s", EPHEMERAL_HEAT_POD_NAME) - else: - log.info("Pod does not exist: %s", EPHEMERAL_HEAT_POD_NAME) - - def _decode(self, encoded): - if not encoded: - return "" - decoded = encoded.decode('utf-8') - if decoded.endswith('\n'): - decoded = decoded[:-1] - return decoded - - def _get_transport_url(self): - user = self._decode(subprocess.check_output( - ['sudo', 'hiera', 'rabbitmq::default_user'])) - password = self._decode(subprocess.check_output( - ['sudo', 'hiera', 'rabbitmq::default_pass'])) - fqdn_ctlplane = self._decode(subprocess.check_output( - ['sudo', 'hiera', 'fqdn_ctlplane'])) - port = self._decode(subprocess.check_output( - ['sudo', 'hiera', 'rabbitmq::port'])) - - transport_url = "rabbit://%s:%s@%s:%s/?ssl=0" % \ - (user, password, fqdn_ctlplane, port) - return transport_url - - def _get_db_connection(self): - return ('mysql+pymysql://' - 'heat:heat@{}/heat?read_default_file=' - '/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'.format( - oooutils.bracket_ipv6(self._get_ctlplane_vip()))) - - def _get_ctlplane_vip(self): - return self._decode(subprocess.check_output( - ['sudo', 'hiera', 'controller_virtual_ip'])) - - def _get_ctlplane_ip(self): - return self._decode(subprocess.check_output( - ['sudo', 'hiera', 'ctlplane'])) - - def _get_num_engine_workers(self): - return int(multiprocessing.cpu_count() / 2) - - @retry(retry=retry_if_exception_type(HeatPodMessageQueueException), - reraise=True, - stop=(stop_after_delay(10) | stop_after_attempt(10)), - wait=wait_fixed(0.5)) - def wait_for_message_queue(self): - queue_name = 'engine.' + EPHEMERAL_HEAT_POD_NAME - output = subprocess.check_output([ - 'sudo', 'podman', 'exec', 'rabbitmq', - 'rabbitmqctl', 'list_queues']) - if str(output).count(queue_name) < 1: - msg = "Message queue for ephemeral heat not created in time." - raise HeatPodMessageQueueException(msg) - - def _get_log_file_path(self): - return 'heat-{}.log'.format(self.timestamp) - - def _read_heat_config(self): - config = configparser.ConfigParser() - config.read(self.config_file) - return config - - def _write_heat_config(self): - heat_config_tmpl_path = os.path.join(DEFAULT_TEMPLATES_DIR, - EPHEMERAL_HEAT_POD_NAME, - "heat.conf.j2") - with open(heat_config_tmpl_path) as tmpl: - heat_config_tmpl = jinja2.Template(tmpl.read()) - - config_vars = { - "transport_url": self._get_transport_url(), - "db_connection": self._get_db_connection(), - "api_port": self.api_port, - "num_engine_workers": self._get_num_engine_workers(), - "log_file": self.log_file, - } - heat_config = heat_config_tmpl.render(**config_vars) - - with open(self.config_file, 'w') as conf: - conf.write(heat_config) - - def _write_heat_pod(self): - heat_pod_tmpl_path = os.path.join(DEFAULT_TEMPLATES_DIR, - EPHEMERAL_HEAT_POD_NAME, - "heat-pod.yaml.j2") - with open(heat_pod_tmpl_path) as tmpl: - heat_pod_tmpl = jinja2.Template(tmpl.read()) - - pod_vars = { - "install_dir": self.install_dir, - "heat_dir": self.heat_dir, - "api_image": self.api_container_image, - "engine_image": self.engine_container_image, - "heat_pod_name": EPHEMERAL_HEAT_POD_NAME - } - heat_pod = heat_pod_tmpl.render(**pod_vars) - - heat_pod_path = os.path.join(self.heat_dir, "heat-pod.yaml") - with open(heat_pod_path, 'w') as conf: - conf.write(heat_pod) diff --git a/tripleoclient/plugin.py b/tripleoclient/plugin.py deleted file mode 100644 index 34fd4460d..000000000 --- a/tripleoclient/plugin.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""OpenStackClient Plugin interface""" - -import logging - -from osc_lib import utils - -LOG = logging.getLogger(__name__) - -DEFAULT_TRIPLEOCLIENT_API_VERSION = '2' - -# Required by the OSC plugin interface -API_NAME = 'tripleoclient' -API_VERSION_OPTION = 'os_tripleoclient_api_version' -API_VERSIONS = { - '2': 'tripleoclient.plugin' -} - - -def make_client(instance): - return ClientWrapper(instance) - - -# Required by the OSC plugin interface -def build_option_parser(parser): - """Hook to add global options - - Called from openstackclient.shell.OpenStackShell.__init__() - after the builtin parser has been initialized. This is - where a plugin can add global options such as an API version setting. - - :param argparse.ArgumentParser parser: The parser object that has been - initialized by OpenStackShell. - """ - parser.add_argument( - '--os-tripleoclient-api-version', - metavar='', - default=utils.env( - 'OS_TRIPLEOCLIENT_API_VERSION', - default=DEFAULT_TRIPLEOCLIENT_API_VERSION), - help='TripleO Client API version, default=' + - DEFAULT_TRIPLEOCLIENT_API_VERSION + - ' (Env: OS_TRIPLEOCLIENT_API_VERSION)') - return parser - - -class ClientWrapper(object): - - def __init__(self, instance): - self._instance = instance - self._local_orchestration = None - - def local_orchestration(self, api_port): - """Returns an local_orchestration service client""" - - if self._local_orchestration is not None: - return self._local_orchestration - - API_VERSIONS = { - '1': 'heatclient.v1.client.Client', - } - - heat_client = utils.get_client_class( - API_NAME, - '1', - API_VERSIONS) - LOG.debug('Instantiating local_orchestration client: %s', heat_client) - - client = heat_client( - endpoint='http://127.0.0.1:%s/v1/admin' % api_port, - username='admin', - password='fake', - region_name='regionOne', - token='fake', - ) - - self._local_orchestration = client - return self._local_orchestration diff --git a/tripleoclient/tests/__init__.py b/tripleoclient/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/base.py b/tripleoclient/tests/base.py deleted file mode 100644 index d2fc571a6..000000000 --- a/tripleoclient/tests/base.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import fixtures -import testtools - -from tripleoclient.tests import fakes - -_TRUE_VALUES = ('true', '1', 'yes') - - -class TestCase(testtools.TestCase): - - """Test case base class for all unit tests.""" - - def setUp(self): - """Run before each test method to initialize test environment.""" - - super(TestCase, self).setUp() - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - self.useFixture(fixtures.NestedTempfile()) - self.temp_homedir = self.useFixture(fixtures.TempHomeDir()).path - - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - self.log_fixture = self.useFixture(fixtures.FakeLogger()) - - -class TestCommand(TestCase): - """Test command classes""" - - def setUp(self): - super(TestCommand, self).setUp() - # Build up a fake app - self.app = fakes.FakeApp() - self.app.client_manager = fakes.FakeClientManager() - - def check_parser(self, cmd, args, verify_args): - cmd_parser = cmd.get_parser('check_parser') - parsed_args = cmd_parser.parse_args(args) - for av in verify_args: - attr, value = av - if attr: - self.assertIn(attr, parsed_args) - self.assertEqual(getattr(parsed_args, attr), value) - return parsed_args diff --git a/tripleoclient/tests/config/__init__.py b/tripleoclient/tests/config/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/config/test_config_base.py b/tripleoclient/tests/config/test_config_base.py deleted file mode 100644 index c80f6a184..000000000 --- a/tripleoclient/tests/config/test_config_base.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from tripleoclient.config.base import BaseConfig -from tripleoclient.tests import base - - -class TestBaseConfig(base.TestCase): - def setUp(self): - super(TestBaseConfig, self).setUp() - # Get the class object to test - self.config = BaseConfig() - - def test_sort_opts(self): - _opts = [ - cfg.BoolOpt('b', default=True), - cfg.BoolOpt('a', default=True) - ] - expected = [ - cfg.BoolOpt('a', default=True), - cfg.BoolOpt('b', default=True) - ] - ret = self.config.sort_opts(_opts) - self.assertEqual(expected, ret) - - def test_get_base_opts(self): - ret = self.config.get_base_opts() - expected = ['cleanup', 'output_dir'] - self.assertEqual(expected, [x.name for x in ret]) diff --git a/tripleoclient/tests/config/test_config_standalone.py b/tripleoclient/tests/config/test_config_standalone.py deleted file mode 100644 index 64eb6f20d..000000000 --- a/tripleoclient/tests/config/test_config_standalone.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from tripleoclient.config.standalone import StandaloneConfig -from tripleoclient.tests import base - - -class TestStandaloneConfig(base.TestCase): - def setUp(self): - super(TestStandaloneConfig, self).setUp() - # Get the class object to test - self.config = StandaloneConfig() - - def test_get_base_opts(self): - ret = self.config.get_base_opts() - expected = ['cleanup', - 'container_cli', - 'container_healthcheck_disabled', - 'container_images_file', - 'container_insecure_registries', - 'container_registry_mirror', - 'custom_env_files', - 'deployment_user', - 'heat_container_image', - 'heat_native', - 'hieradata_override', - 'net_config_override', - 'networks_file', - 'output_dir', - 'roles_file', - 'templates'] - self.assertEqual(expected, [x.name for x in ret]) - - def test_get_service_opts(self): - ret = self.config.get_enable_service_opts() - expected = ['enable_cinder', - 'enable_frr', - 'enable_heat', - 'enable_ironic', - 'enable_ironic_inspector', - 'enable_keystone', - 'enable_neutron', - 'enable_nova', - 'enable_novajoin', - 'enable_swift', - 'enable_telemetry', - 'enable_validations'] - self.assertEqual(expected, [x.name for x in ret]) - for x in ret: - if x.name == 'enable_keystone': - self.assertEqual(x.default, True, - "%s config not True" % x.name) - else: - self.assertEqual(x.default, False, - "%s config not False" % x.name) - - def test_get_service_opts_enabled(self): - ret = self.config.get_enable_service_opts(cinder=True, - frr=True, - heat=True, - keystone=True, - ironic=True, - ironic_inspector=True, - neutron=True, - nova=True, - novajoin=True, - swift=True, - telemetry=True, - validations=True) - expected = ['enable_cinder', - 'enable_frr', - 'enable_heat', - 'enable_ironic', - 'enable_ironic_inspector', - 'enable_keystone', - 'enable_neutron', - 'enable_nova', - 'enable_novajoin', - 'enable_swift', - 'enable_telemetry', - 'enable_validations'] - self.assertEqual(expected, [x.name for x in ret]) - for x in ret: - self.assertEqual(x.default, True, "%s config not True" % x.name) - - def test_get_opts(self): - ret = self.config.get_opts() - expected = ['cleanup', - 'container_cli', - 'container_healthcheck_disabled', - 'container_images_file', - 'container_insecure_registries', - 'container_registry_mirror', - 'custom_env_files', - 'deployment_user', - 'enable_cinder', - 'enable_frr', - 'enable_heat', - 'enable_ironic', - 'enable_ironic_inspector', - 'enable_keystone', - 'enable_neutron', - 'enable_nova', - 'enable_novajoin', - 'enable_swift', - 'enable_telemetry', - 'enable_validations', - 'heat_container_image', - 'heat_native', - 'hieradata_override', - 'net_config_override', - 'networks_file', - 'output_dir', - 'roles_file', - 'templates'] - self.assertEqual(expected, [x.name for x in ret]) diff --git a/tripleoclient/tests/config/test_config_undercloud.py b/tripleoclient/tests/config/test_config_undercloud.py deleted file mode 100644 index 30e710511..000000000 --- a/tripleoclient/tests/config/test_config_undercloud.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from tripleoclient.config.undercloud import UndercloudConfig -from tripleoclient.tests import base - - -class TestUndercloudConfig(base.TestCase): - def setUp(self): - super(TestUndercloudConfig, self).setUp() - # Get the class object to test - self.config = UndercloudConfig() - - def test_get_base_opts(self): - ret = self.config.get_base_opts() - expected = ['additional_architectures', - 'auth_token_lifetime', - 'certificate_generation_ca', - 'clean_nodes', - 'cleanup', - 'container_cli', - 'container_healthcheck_disabled', - 'container_images_file', - 'container_insecure_registries', - 'container_registry_mirror', - 'custom_env_files', - 'deployment_user', - 'discovery_default_driver', - 'enable_node_discovery', - 'enable_routed_networks', - 'enable_swift_encryption', - 'enabled_hardware_types', - 'generate_service_certificate', - 'heat_container_image', - 'heat_native', - 'hieradata_override', - 'inspection_extras', - 'inspection_interface', - 'inspection_runbench', - 'ipa_otp', - 'ipv6_address_mode', - 'ipxe_enabled', - 'ironic_default_network_interface', - 'ironic_enabled_network_interfaces', - 'local_interface', - 'local_ip', - 'local_mtu', - 'local_subnet', - 'net_config_override', - 'networks_file', - 'output_dir', - 'overcloud_domain_name', - 'roles_file', - 'scheduler_max_attempts', - 'service_principal', - 'subnets', - 'templates', - 'undercloud_admin_host', - 'undercloud_debug', - 'undercloud_enable_selinux', - 'undercloud_hostname', - 'undercloud_log_file', - 'undercloud_nameservers', - 'undercloud_ntp_servers', - 'undercloud_public_host', - 'undercloud_service_certificate', - 'undercloud_timezone'] - self.assertEqual(expected, [x.name for x in ret]) - - def test_get_opts(self): - ret = self.config.get_opts() - expected = ['additional_architectures', - 'auth_token_lifetime', - 'certificate_generation_ca', - 'clean_nodes', - 'cleanup', - 'container_cli', - 'container_healthcheck_disabled', - 'container_images_file', - 'container_insecure_registries', - 'container_registry_mirror', - 'custom_env_files', - 'deployment_user', - 'discovery_default_driver', - 'enable_cinder', - 'enable_frr', - 'enable_heat', - 'enable_ironic', - 'enable_ironic_inspector', - 'enable_keystone', - 'enable_neutron', - 'enable_node_discovery', - 'enable_nova', - 'enable_novajoin', - 'enable_routed_networks', - 'enable_swift', - 'enable_swift_encryption', - 'enable_telemetry', - 'enable_validations', - 'enabled_hardware_types', - 'generate_service_certificate', - 'heat_container_image', - 'heat_native', - 'hieradata_override', - 'inspection_extras', - 'inspection_interface', - 'inspection_runbench', - 'ipa_otp', - 'ipv6_address_mode', - 'ipxe_enabled', - 'ironic_default_network_interface', - 'ironic_enabled_network_interfaces', - 'local_interface', - 'local_ip', - 'local_mtu', - 'local_subnet', - 'net_config_override', - 'networks_file', - 'output_dir', - 'overcloud_domain_name', - 'roles_file', - 'scheduler_max_attempts', - 'service_principal', - 'subnets', - 'templates', - 'undercloud_admin_host', - 'undercloud_debug', - 'undercloud_enable_selinux', - 'undercloud_hostname', - 'undercloud_log_file', - 'undercloud_nameservers', - 'undercloud_ntp_servers', - 'undercloud_public_host', - 'undercloud_service_certificate', - 'undercloud_timezone'] - self.assertEqual(expected, [x.name for x in ret]) - - def test_get_subnet_opts(self): - expected = ['cidr', - 'dhcp_end', - 'dhcp_exclude', - 'dhcp_start', - 'dns_nameservers', - 'gateway', - 'host_routes', - 'inspection_iprange', - 'masquerade'] - - ret = self.config.get_local_subnet_opts() - self.assertEqual(expected, [x.name for x in ret]) - - ret = self.config.get_remote_subnet_opts() - self.assertEqual(expected, [x.name for x in ret]) - - def test_get_undercloud_service_opts(self): - ret = self.config.get_undercloud_service_opts() - expected = {'enable_cinder': False, - 'enable_frr': False, - 'enable_keystone': False, - 'enable_heat': False, - 'enable_ironic': True, - 'enable_ironic_inspector': True, - 'enable_neutron': True, - 'enable_nova': False, - 'enable_novajoin': False, - 'enable_telemetry': False, - 'enable_swift': False, - 'enable_validations': True} - self.assertEqual(sorted(expected.keys()), [x.name for x in ret]) - for x in ret: - self.assertEqual(expected[x.name], x.default, "%s config not %s" % - (x.name, expected[x.name])) diff --git a/tripleoclient/tests/fakes.py b/tripleoclient/tests/fakes.py deleted file mode 100644 index 7cbc96bf9..000000000 --- a/tripleoclient/tests/fakes.py +++ /dev/null @@ -1,437 +0,0 @@ -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import sys -from unittest import mock - -from osc_lib.tests import utils - -AUTH_TOKEN = "foobar" -AUTH_URL = "http://0.0.0.0" -WS_URL = "ws://0.0.0.0" -WSS_URL = "wss://0.0.0.0" - -VALIDATIONS_LIST = [{ - 'description': 'My Validation One Description', - 'groups': ['prep', 'pre-deployment'], - 'id': 'my_val1', - 'name': 'My Validition One Name', - 'parameters': {} -}, { - 'description': 'My Validation Two Description', - 'groups': ['prep', 'pre-introspection'], - 'id': 'my_val2', - 'name': 'My Validition Two Name', - 'parameters': {'min_value': 8} -}] - -GROUPS_LIST = [ - ('group1', 'Group1 description'), - ('group2', 'Group2 description'), - ('group3', 'Group3 description'), -] - -VALIDATIONS_LOGS_CONTENTS_LIST = [{ - 'plays': [{ - 'play': { - 'duration': { - 'end': '2019-11-25T13:40:17.538611Z', - 'start': '2019-11-25T13:40:14.404623Z', - 'time_elapsed': '0:00:03.753' - }, - 'host': 'undercloud', - 'id': '008886df-d297-1eaa-2a74-000000000008', - 'validation_id': '512e', - 'validation_path': - '/usr/share/ansible/validation-playbooks' - }, - 'tasks': [ - { - 'hosts': { - 'undercloud': { - '_ansible_no_log': False, - 'action': 'command', - 'changed': False, - 'cmd': [u'ls', '/sys/class/block/'], - 'delta': '0:00:00.018913', - 'end': '2019-11-25 13:40:17.120368', - 'invocation': { - 'module_args': { - '_raw_params': 'ls /sys/class/block/', - '_uses_shell': False, - 'argv': None, - 'chdir': None, - 'creates': None, - 'executable': None, - 'removes': None, - 'stdin': None, - 'stdin_add_newline': True, - 'strip_empty_ends': True, - 'warn': True - } - }, - 'rc': 0, - 'start': '2019-11-25 13:40:17.101455', - 'stderr': '', - 'stderr_lines': [], - 'stdout': 'vda', - 'stdout_lines': [u'vda'] - } - }, - 'task': { - 'duration': { - 'end': '2019-11-25T13:40:17.336687Z', - 'start': '2019-11-25T13:40:14.529880Z' - }, - 'id': - '008886df-d297-1eaa-2a74-00000000000d', - 'name': - 'advanced-format-512e-support : List the available drives' - } - }, - { - 'hosts': { - 'undercloud': { - 'action': - 'advanced_format', - 'changed': False, - 'msg': - 'All items completed', - 'results': [{ - '_ansible_item_label': 'vda', - '_ansible_no_log': False, - 'ansible_loop_var': 'item', - 'changed': False, - 'item': 'vda', - 'skip_reason': 'Conditional result was False', - 'skipped': True - }], - 'skipped': True - } - }, - 'task': { - 'duration': { - 'end': '2019-11-25T13:40:17.538611Z', - 'start': '2019-11-25T13:40:17.341704Z' - }, - 'id': '008886df-d297-1eaa-2a74-00000000000e', - 'name': - 'advanced-format-512e-support: Detect the drive' - } - } - ] - }], - 'stats': { - 'undercloud': { - 'changed': 0, - 'failures': 0, - 'ignored': 0, - 'ok': 1, - 'rescued': 0, - 'skipped': 1, - 'unreachable': 0 - } - }, - 'validation_output': [] -}] - -FAKE_SUCCESS_RUN = [{'Duration': '0:00:01.761', - 'Host_Group': 'overcloud', - 'Status': 'PASSED', - 'Status_by_Host': 'subnode-1,PASSED, subnode-2,PASSED', - 'UUID': '123', - 'Unreachable_Hosts': '', - 'Validations': 'foo'}] - - -class FakeOptions(object): - def __init__(self): - self.debug = True - - -class FakeApp(object): - def __init__(self): - _stdout = None - self.LOG = logging.getLogger('FakeApp') - self.client_manager = None - self.stdin = sys.stdin - self.stdout = _stdout or sys.stdout - self.stderr = sys.stderr - self.restapi = None - self.command_options = None - self.options = FakeOptions() - - -class FakeStackObject(object): - stack_name = 'undercloud' - outputs = [] - - @staticmethod - def get(*args, **kwargs): - pass - - -class FakeClientManager(object): - def __init__(self): - self.identity = None - self.auth_ref = None - self.tripleoclient = FakeClientWrapper() - - -class FakeHandle(object): - def __enter__(self): - return self - - def __exit__(self, *args): - return - - -class FakeFile(FakeHandle): - def __init__(self, contents): - self.contents = contents - - def read(self): - if not self.contents: - raise ValueError('I/O operation on closed file') - return self.contents - - def close(self): - self.contents = None - - -class FakeClientWrapper(object): - - def __init__(self): - self._instance = mock.Mock() - - -class FakeRunnerConfig(object): - env = dict() # noqa - artifact_dir = '' - - def __init__(self): - self.command = [] - - def prepare(self): - pass - - -class FakeInstanceData(object): - cacert = '/file/system/path' - _region_name = 'region1' - - @staticmethod - def get_endpoint_for_service_type(*args, **kwargs): - return 'http://things' - - class auth_ref(object): - trust_id = 'yy' - project_id = 'ww' - - class auth(object): - auth_url = 'http://url' - _project_name = 'projectname' - _username = 'username' - _user_id = 'zz' - - @staticmethod - def get_token(*args, **kwargs): - return '12345abcde' - - @staticmethod - def get_project_id(*args, **kwargs): - return 'xx' - - class session(object): - class auth(object): - class auth_ref(object): - _data = {'token': {}} - - -class FakePlaybookExecution(utils.TestCommand): - - def setUp(self, ansible_mock=True): - super(FakePlaybookExecution, self).setUp() - - self.app.options = FakeOptions() - self.app.client_manager.auth_ref = mock.Mock(auth_token="TOKEN") - self.baremetal = self.app.client_manager.baremetal = mock.MagicMock() - self.app.client_manager.baremetal_introspection = mock.MagicMock() - self.inspector = self.app.client_manager.baremetal_introspection - self.baremetal.node.list.return_value = [] - compute = self.app.client_manager.compute = mock.Mock() - compute.servers.list.return_value = [] - self.app.client_manager.identity = mock.Mock() - self.app.client_manager.image = mock.Mock() - self.app.client_manager.network = mock.Mock() - self.tripleoclient = mock.Mock() - stack = self.app.client_manager.orchestration = mock.Mock() - stack.stacks.get.return_value = FakeStackObject - - get_key = mock.patch('tripleoclient.utils.get_key') - get_key.start() - get_key.return_value = 'keyfile-path' - self.addCleanup(get_key.stop) - - self.register_or_update = mock.patch( - 'tripleoclient.workflows.baremetal.register_or_update', - autospec=True, - return_value=[mock.Mock(uuid='MOCK_NODE_UUID')] - ) - self.register_or_update.start() - self.addCleanup(self.register_or_update.stop) - - if ansible_mock: - get_stack = mock.patch('tripleoclient.utils.get_stack') - get_stack.start() - stack = get_stack.return_value = mock.Mock() - stack.stack_name = 'testStack' - self.addCleanup(get_stack.stop) - - self.gcn = mock.patch( - 'tripleo_common.utils.config.Config', - autospec=True - ) - self.gcn.start() - self.addCleanup(self.gcn.stop) - - self.mkdirs = mock.patch( - 'os.makedirs', - autospec=True - ) - self.mkdirs.start() - self.addCleanup(self.mkdirs.stop) - - -def fake_ansible_runner_run_return(rc=0): - - return 'Test Status', rc - - -class FakeNeutronNetwork(dict): - def __init__(self, **attrs): - NETWORK_ATTRS = ['id', - 'name', - 'status', - 'tenant_id', - 'is_admin_state_up', - 'mtu', - 'segments', - 'is_shared', - 'subnet_ids', - 'provider:network_type', - 'provider:physical_network', - 'provider:segmentation_id', - 'router:external', - 'availability_zones', - 'availability_zone_hints', - 'is_default', - 'tags'] - - raw = dict.fromkeys(NETWORK_ATTRS) - raw.update(attrs) - raw.update({ - 'provider_physical_network': attrs.get( - 'provider:physical_network', None), - 'provider_network_type': attrs.get( - 'provider:network_type', None), - 'provider_segmentation_id': attrs.get( - 'provider:segmentation_id', None) - }) - super(FakeNeutronNetwork, self).__init__(raw) - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - if key in self: - self[key] = value - else: - raise AttributeError(key) - - -class FakeNeutronSubnet(dict): - def __init__(self, **attrs): - SUBNET_ATTRS = ['id', - 'name', - 'network_id', - 'cidr', - 'tenant_id', - 'is_dhcp_enabled', - 'dns_nameservers', - 'allocation_pools', - 'host_routes', - 'ip_version', - 'gateway_ip', - 'ipv6_address_mode', - 'ipv6_ra_mode', - 'subnetpool_id', - 'segment_id', - 'tags'] - - raw = dict.fromkeys(SUBNET_ATTRS) - raw.update(attrs) - super(FakeNeutronSubnet, self).__init__(raw) - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - if key in self: - self[key] = value - else: - raise AttributeError(key) - - -class FakeFlavor(object): - def __init__(self, name, profile=''): - self.name = name - self.profile = name - if profile != '': - self.profile = profile - - def get_keys(self): - return { - 'capabilities:boot_option': 'local', - 'capabilities:profile': self.profile - } - - -class FakeMachine: - def __init__(self, id, name=None, driver=None, driver_info=None, - chassis_uuid=None, instance_info=None, instance_uuid=None, - properties=None, reservation=None, last_error=None, - provision_state='available', is_maintenance=False, - power_state='power off'): - self.id = id - self.name = name - self.driver = driver - self.driver_info = driver_info - self.chassis_uuid = chassis_uuid - self.instance_info = instance_info - self.instance_uuid = instance_uuid - self.properties = properties - self.reservation = reservation - self.last_error = last_error - self.provision_state = provision_state - self.is_maintenance = is_maintenance - self.power_state = power_state diff --git a/tripleoclient/tests/fixture_data/__init__.py b/tripleoclient/tests/fixture_data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/fixture_data/deployment.py b/tripleoclient/tests/fixture_data/deployment.py deleted file mode 100644 index 0f4e9c153..000000000 --- a/tripleoclient/tests/fixture_data/deployment.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import fixtures - - -class DeploymentWorkflowFixture(fixtures.Fixture): - - def _setUp(self): - super(DeploymentWorkflowFixture, self)._setUp() - self.mock_get_hosts_and_enable_ssh_admin = self.useFixture( - fixtures.MockPatch('tripleoclient.workflows.deployment.' - 'get_hosts_and_enable_ssh_admin') - ).mock - self.mock_config_download = self.useFixture(fixtures.MockPatch( - 'tripleoclient.workflows.deployment.config_download') - ).mock - self.mock_set_deployment_status = self.useFixture(fixtures.MockPatch( - 'tripleoclient.workflows.deployment.set_deployment_status') - ).mock - self.mock_create_overcloudrc = self.useFixture(fixtures.MockPatch( - 'tripleoclient.workflows.deployment.create_overcloudrc') - ).mock - make_config_download_dir = \ - 'tripleoclient.workflows.deployment.make_config_download_dir' - self.mock_make_config_download_dir = self.useFixture( - fixtures.MockPatch(make_config_download_dir) - ).mock - - -class UtilsOvercloudFixture(fixtures.Fixture): - - def _setUp(self): - super(UtilsOvercloudFixture, self)._setUp() - self.mock_deploy_tht = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.create_tempest_deployer_input') - ).mock - self.mock_utils_endpoint = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.get_overcloud_endpoint') - ).mock - self.mock_update_deployment_status = self.useFixture( - fixtures.MockPatch( - 'tripleoclient.utils.update_deployment_status') - ).mock - self.mock_get_default_working_dir = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.get_default_working_dir') - ).mock - self.mock_get_default_working_dir.return_value = \ - self.useFixture(fixtures.TempDir()).path - - -class UtilsFixture(fixtures.Fixture): - - def _setUp(self): - super(UtilsFixture, self)._setUp() - self.wait_for_stack_ready_mock = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.wait_for_stack_ready', - return_value=True) - ).mock - self.mock_remove_known_hosts = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.remove_known_hosts') - ).mock - self.mock_run_ansible_playbook = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.run_ansible_playbook') - ).mock - self.mock_get_heat_launcher = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.get_heat_launcher') - ).mock - self.mock_launch_heat = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.launch_heat') - ).mock - self.mock_kill_heat = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.kill_heat') - ).mock - self.mock_rm_heat = self.useFixture(fixtures.MockPatch( - 'tripleoclient.utils.rm_heat') - ).mock - self.mock_export_overcloud = self.useFixture(fixtures.MockPatch( - 'tripleoclient.export.export_overcloud') - ).mock - self.mock_export_overcloud.return_value = {} diff --git a/tripleoclient/tests/test_export.py b/tripleoclient/tests/test_export.py deleted file mode 100644 index c4d6a7527..000000000 --- a/tripleoclient/tests/test_export.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import os - -from unittest import mock -from unittest import TestCase - -from tripleoclient import export -from tripleoclient import utils - - -class TestExport(TestCase): - def setUp(self): - self.unlink_patch = mock.patch('os.unlink') - self.addCleanup(self.unlink_patch.stop) - self.unlink_patch.start() - self.mock_log = mock.Mock('logging.getLogger') - - self.mock_open = mock.mock_open(read_data='{"an_key":"an_value"}') - - ceph_inv = { - 'DistributedComputeHCI': { - 'hosts': { - 'dcn0-distributedcomputehci-0': { - 'foo_ip': '192.168.24.42' - }, - 'dcn0-distributedcomputehci-1': { - 'foo_ip': '192.168.8.8' - } - } - }, - 'mons': { - 'children': { - 'DistributedComputeHCI': {} - } - } - } - self.mock_open_ceph_inv = mock.mock_open(read_data=str(ceph_inv)) - - ceph_global = { - 'service_net_map': { - 'ceph_mon_network': 'storage' - } - } - self.mock_open_ceph_global = mock.mock_open(read_data=str(ceph_global)) - - ceph_all = { - 'cluster': 'dcn0', - 'fsid': 'a5a22d37-e01f-4fa0-a440-c72585c7487f', - 'keys': [ - {'name': 'client.openstack'} - ] - } - self.mock_open_ceph_all = mock.mock_open(read_data=str(ceph_all)) - - def _get_stack_saved_output_item(self, output_key, working_dir): - outputs = { - 'EndpointMap': dict(em_key='em_value'), - 'HostsEntry': 'hosts entry', - 'GlobalConfig': dict(gc_key='gc_value'), - 'AuthCloudName': 'central', - } - return outputs[output_key] - - @mock.patch('tripleoclient.utils.get_stack_saved_output_item') - @mock.patch('tripleoclient.utils.os.path.exists', - autospec=True, reutrn_value=True) - def test_export_stack(self, mock_exists, mock_output_item): - mock_output_item.side_effect = self._get_stack_saved_output_item - working_dir = utils.get_default_working_dir('overcloud') - with mock.patch('tripleoclient.utils.open', self.mock_open): - data = export.export_stack(working_dir, "overcloud") - - expected = \ - {'AllNodesExtraMapData': {u'an_key': u'an_value'}, - 'AuthCloudName': 'central', - 'EndpointMapOverride': {'em_key': 'em_value'}, - 'ExtraHostFileEntries': 'hosts entry', - 'GlobalConfigExtraMapData': {'gc_key': 'gc_value'}} - - self.assertEqual(expected, data) - self.mock_open.assert_called_once_with( - os.path.join( - os.environ.get('HOME'), - 'config-download/overcloud/group_vars/overcloud.json'), - 'r') - - @mock.patch('tripleoclient.utils.get_stack_saved_output_item') - @mock.patch('tripleoclient.utils.os.path.exists', - autospec=True, reutrn_value=True) - def test_export_stack_auth_cloud_name_set( - self, mock_exists, mock_output_item): - mock_output_item.side_effect = self._get_stack_saved_output_item - working_dir = utils.get_default_working_dir('overcloud') - with mock.patch('tripleoclient.utils.open', self.mock_open): - data = export.export_stack(working_dir, "overcloud") - - expected = \ - {'AllNodesExtraMapData': {u'an_key': u'an_value'}, - 'AuthCloudName': 'central', - 'EndpointMapOverride': {'em_key': 'em_value'}, - 'ExtraHostFileEntries': 'hosts entry', - 'GlobalConfigExtraMapData': {'gc_key': 'gc_value'}} - - self.assertEqual(expected, data) - self.mock_open.assert_called_once_with( - os.path.join( - os.environ.get('HOME'), - 'config-download/overcloud/group_vars/overcloud.json'), - 'r') - - @mock.patch('tripleoclient.utils.get_stack_saved_output_item') - @mock.patch('tripleoclient.utils.os.path.exists', - autospec=True, reutrn_value=True) - def test_export_stack_should_filter(self, mock_exists, mock_stack_output): - working_dir = utils.get_default_working_dir('overcloud') - mock_stack_output.side_effect = self._get_stack_saved_output_item - self.mock_open = mock.mock_open( - read_data=('{"an_key":"an_value","ovn_dbs_vip":"vip",' - '"ovn_dbs_node_ips":[1,2]}')) - with mock.patch('builtins.open', self.mock_open): - data = export.export_stack( - working_dir, "overcloud", should_filter=True) - - expected = \ - {'AllNodesExtraMapData': {u'ovn_dbs_vip': u'vip', - u'ovn_dbs_node_ips': [1, 2]}, - 'AuthCloudName': 'central', - 'EndpointMapOverride': {'em_key': 'em_value'}, - 'ExtraHostFileEntries': 'hosts entry', - 'GlobalConfigExtraMapData': {'gc_key': 'gc_value'}} - - self.assertEqual(expected, data) - self.mock_open.assert_called_once_with( - os.path.join( - os.environ.get('HOME'), - 'config-download/overcloud/group_vars/overcloud.json'), - 'r') - - @mock.patch('tripleoclient.utils.os.path.exists', - autospec=True, reutrn_value=True) - def test_export_stack_cd_dir(self, mock_exists): - working_dir = utils.get_default_working_dir('overcloud') - with mock.patch('tripleoclient.utils.open', self.mock_open): - export.export_stack(working_dir, "overcloud", - config_download_dir='/foo') - self.mock_open.assert_called_with( - '/foo/overcloud/group_vars/overcloud.json', 'r') - - @mock.patch('tripleoclient.export.LOG') - @mock.patch('tripleo_common.utils.plan.generate_passwords') - def test_export_passwords(self, mock_gen_pass, mock_log): - mock_passwords = { - 'AdminPassword': 'A', - 'RpcPassword': 'B', - 'CephClientKey': 'cephkey', - 'CephClusterFSID': 'cephkey', - 'CephRgwKey': 'cephkey'} - - mock_gen_pass.return_value = mock_passwords - - expected_password_export = mock_passwords.copy() - working_dir = utils.get_default_working_dir('overcloud') - with mock.patch('builtins.open', mock.mock_open()): - data = export.export_passwords(working_dir, 'overcloud', False) - - self.assertEqual( - expected_password_export, - data) - - @mock.patch('tripleoclient.utils.get_stack_saved_output_item') - @mock.patch('tripleoclient.export.LOG') - @mock.patch('tripleo_common.utils.plan.generate_passwords') - def test_export_passwords_excludes(self, mock_gen_pass, mock_log, - mock_output_item): - mock_output_item.side_effect = self._get_stack_saved_output_item - mock_passwords = { - 'AdminPassword': 'A', - 'RpcPassword': 'B', - 'CephClientKey': 'cephkey', - 'CephClusterFSID': 'cephkey', - 'CephRgwKey': 'cephkey'} - - mock_gen_pass.return_value = mock_passwords - - expected_password_export = { - 'AdminPassword': 'A', - 'RpcPassword': 'B'} - - working_dir = utils.get_default_working_dir('overcloud') - with mock.patch('builtins.open', mock.mock_open()): - data = export.export_passwords(working_dir, 'overcloud') - - self.assertEqual(expected_password_export, data) - - def test_export_ceph_net_key(self): - with mock.patch('builtins.open', self.mock_open_ceph_global): - mon_key = export.export_ceph_net_key('dcn0', - config_download_dir='/foo') - self.assertEqual(mon_key, 'storage_ip') - self.mock_open_ceph_global.assert_called_once_with( - '/foo/dcn0/global_vars.yaml', 'r') - - def test_export_storage_ips(self): - with mock.patch('builtins.open', self.mock_open_ceph_inv): - storage_ips = export.export_storage_ips('dcn0', - config_download_dir='/foo', - ceph_net_key='foo_ip') - self.assertEqual(storage_ips, ['192.168.24.42', '192.168.8.8']) - self.mock_open_ceph_inv.assert_called_once_with( - '/foo/dcn0/ceph-ansible/inventory.yml', 'r') - - def test_export_ceph(self): - expected = { - 'external_cluster_mon_ips': '192.168.24.42', - 'keys': [ - {'name': 'client.openstack'} - ], - 'ceph_conf_overrides': { - 'client': { - 'keyring': '/etc/ceph/dcn0.client.openstack.keyring' - } - }, - 'cluster': 'dcn0', - 'fsid': 'a5a22d37-e01f-4fa0-a440-c72585c7487f', - 'dashboard_enabled': False - } - with mock.patch('builtins.open', self.mock_open_ceph_all): - data = export.export_ceph('dcn0', 'openstack', - config_download_dir='/foo', - mon_ips=['192.168.24.42'], - config_download_files=['ceph-ansible']) - self.assertEqual(data, expected) - self.mock_open_ceph_all.assert_called_once_with( - '/foo/dcn0/ceph-ansible/group_vars/all.yml', 'r') diff --git a/tripleoclient/tests/test_heat_launcher.py b/tripleoclient/tests/test_heat_launcher.py deleted file mode 100644 index fc8c4e760..000000000 --- a/tripleoclient/tests/test_heat_launcher.py +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import os -from pathlib import Path -import shutil -import subprocess -import time -from unittest import mock - -from tripleoclient import constants -from tripleoclient import heat_launcher -from tripleoclient.exceptions import HeatPodMessageQueueException -from tripleoclient.tests import base -from tripleoclient import utils - - -class TestHeatPodLauncher(base.TestCase): - def setUp(self): - super(TestHeatPodLauncher, self).setUp() - self.run = mock.patch('subprocess.run').start() - self.call = mock.patch('subprocess.call').start() - self.check_call = mock.patch('subprocess.check_call').start() - self.check_output = mock.patch('subprocess.check_output').start() - self.templates_dir = mock.patch( - 'tripleoclient.heat_launcher.DEFAULT_TEMPLATES_DIR', - os.path.join(os.path.dirname(__file__), - '..', '..', 'templates')).start() - self.heat_dir = self.useFixture(fixtures.TempDir()).path - self.bracket_ipv6 = mock.patch( - 'tripleoclient.utils.bracket_ipv6').start() - self.bracket_ipv6.return_value = '1.1.1.1' - - self.addCleanup(mock.patch.stopall) - - def get_launcher(self, **kwargs): - return heat_launcher.HeatPodLauncher( - heat_dir=self.heat_dir, - use_tmp_dir=False, - **kwargs) - - def test_rm_heat_launcher(self): - self.assertIsInstance(self.get_launcher(rm_heat=True), - heat_launcher.HeatPodLauncher) - - def test_chcon(self): - launcher = self.get_launcher() - launcher._chcon() - calls = [ - mock.call(['chcon', '-R', '-t', 'container_file_t', '-l', 's0', - launcher.heat_dir]), - mock.call(['chcon', '-R', '-t', 'container_file_t', '-l', 's0', - launcher.heat_dir]) - ] - self.assertEqual(self.check_call.mock_calls, calls) - - def test_fetch_container_image(self): - launcher = self.get_launcher(skip_heat_pull=True) - self.check_output.reset_mock() - launcher._fetch_container_image() - self.check_output.assert_not_called() - - # With skip_heat_pull=False, this should try and run the command to - # pull the default images from quay.io - launcher = self.get_launcher(skip_heat_pull=False) - launcher._fetch_container_image() - self.check_output.assert_called_with(['sudo', 'podman', 'pull', - mock.ANY]) - - # With skip_heat_pull=False, but using the default ephemeral heat - # container images, this should still skip the command to run the pull - launcher = self.get_launcher(skip_heat_pull=False) - launcher.api_container_image = \ - constants.DEFAULT_EPHEMERAL_HEAT_API_CONTAINER - launcher.engine_container_image = \ - constants.DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER - self.check_output.reset_mock() - launcher._fetch_container_image() - self.check_output.assert_not_called() - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher._decode') - def test_get_pod_state(self, mock_decode): - launcher = self.get_launcher() - launcher.get_pod_state() - self.run.assert_called_once_with( - ['sudo', 'podman', 'pod', 'inspect', '--format', '"{{.State}}"', - 'ephemeral-heat'], check=False, stderr=-2, stdout=-1) - - @mock.patch( - 'tripleoclient.heat_launcher.HeatPodLauncher._write_heat_config') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher._write_heat_pod') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.get_pod_state') - def test_launch_heat( - self, mock_get_pod_state, mock_write_heat_pod, - mock_write_heat_config): - - launcher = self.get_launcher() - self.check_call.reset_mock() - - mock_get_pod_state.return_value = 'Running' - launcher.launch_heat() - self.check_call.assert_not_called() - - mock_get_pod_state.return_value = 'Exited' - launcher.launch_heat() - self.check_call.assert_called_once_with(['sudo', 'podman', 'play', - 'kube', mock.ANY]) - self.check_call.reset_mock() - - mock_get_pod_state.return_value = '' - launcher.launch_heat() - self.check_call.assert_called_once_with(['sudo', 'podman', 'play', - 'kube', mock.ANY]) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.do_restore_db') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.database_exists') - def test_heat_db_sync( - self, mock_db_exists, mock_do_restore_db): - - launcher = self.get_launcher() - mock_db_exists.return_value = True - launcher.heat_db_sync(restore_db=False) - calls = [ - mock.call(['chcon', '-R', '-t', 'container_file_t', '-l', 's0', - mock.ANY]), - mock.call(['sudo', 'podman', 'run', '--rm', '--user', 'heat', - '--net', 'host', '--volume', mock.ANY, '--volume', - mock.ANY, mock.ANY, 'heat-manage', 'db_sync']) - ] - self.assertEqual(self.check_call.mock_calls, calls) - self.assertFalse(mock_do_restore_db.called) - - self.check_call.reset_mock() - - mock_db_exists.return_value = True - launcher.heat_db_sync(restore_db=True) - self.check_call.assert_called_once_with([ - 'sudo', 'podman', 'run', '--rm', '--user', 'heat', '--net', 'host', - '--volume', mock.ANY, '--volume', mock.ANY, mock.ANY, - 'heat-manage', 'db_sync' - ]) - self.assertTrue(mock_do_restore_db.called) - - self.check_call.reset_mock() - mock_db_exists.return_value = False - launcher.heat_db_sync(restore_db=True) - calls = [ - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', 'create database heat']), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', "create user if not exists 'heat'@'%' " - "identified by 'heat'"]), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', 'heat', '-e', "grant all privileges on heat.* " - "to 'heat'@'%'"]), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', 'flush privileges;']), - mock.call(['sudo', 'podman', 'run', '--rm', '--user', 'heat', - '--net', 'host', '--volume', mock.ANY, '--volume', - mock.ANY, mock.ANY, 'heat-manage', 'db_sync']) - ] - self.assertEqual(self.check_call.mock_calls, calls) - self.assertTrue(mock_do_restore_db.called) - - self.check_call.reset_mock() - mock_do_restore_db.reset_mock() - mock_db_exists.return_value = False - launcher.heat_db_sync(restore_db=False) - calls = [ - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', 'create database heat']), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', "create user if not exists 'heat'@'%' " - "identified by 'heat'"]), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', 'heat', '-e', "grant all privileges on heat.* " - "to 'heat'@'%'"]), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', 'flush privileges;']), - mock.call(['sudo', 'podman', 'run', '--rm', '--user', 'heat', - '--net', 'host', '--volume', mock.ANY, '--volume', - mock.ANY, mock.ANY, 'heat-manage', 'db_sync']) - ] - self.assertEqual(self.check_call.mock_calls, calls) - self.assertFalse(mock_do_restore_db.called) - - @mock.patch('os.unlink') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.untar_file') - @mock.patch('glob.glob') - def test_do_restore_db( - self, mock_glob, mock_untar, mock_unlink): - - launcher = self.get_launcher() - - one = Path(launcher.heat_dir) / 'heat-db-dump-one.tar.bz2' - two = Path(launcher.heat_dir) / 'heat-db-dump-two.tar.bz2' - three = Path(launcher.heat_dir) / 'heat-db-dump-three.tar.bz2' - - now = time.time() - one.touch() - two.touch() - three.touch() - os.utime(str(one), (now, 1000)) - os.utime(str(two), (now, 2000)) - os.utime(str(three), (now, 3000)) - mock_glob.return_value = [str(one), str(two), str(three)] - - def untar(path, dir): - p = Path(path.rstrip('.tar.bz2')) - p.touch() - - mock_untar.side_effect = untar - - mock_open = mock.mock_open() - with mock.patch('builtins.open', mock_open): - # pylint: disable=bad-str-strip-call - launcher.do_restore_db() - self.assertEqual(mock.call(str(three), launcher.heat_dir), - mock_untar.call_args) - self.assertEqual(mock.call(launcher.heat_dir + '/heat-db.sql'), - mock_unlink.call_args) - mock_open.assert_called_with(launcher.heat_dir + '/heat-db.sql') # noqa - self.assertTrue(self.check_call('mysql heat', self.run)) - - mock_unlink.reset_mock() - self.run.reset_mock() - two.touch() - mock_open = mock.mock_open() - with mock.patch('builtins.open', mock_open): - # pylint: disable=bad-str-strip-call - launcher.do_restore_db() - self.assertEqual(mock.call(str(two), launcher.heat_dir), - mock_untar.call_args) - self.assertEqual(mock.call(launcher.heat_dir + '/heat-db.sql'), - mock_unlink.call_args) - mock_open.assert_called_with(launcher.heat_dir + '/heat-db.sql') # noqa - self.assertTrue(self.check_call('mysql heat', self.run)) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.tar_file') - def test_do_backup_db(self, mock_tar): - launcher = self.get_launcher() - p = Path(launcher.heat_dir) / 'heat-db.sql' - p.touch() - self.assertRaises(Exception, launcher.do_backup_db, str(p)) - - p.unlink() - launcher.do_backup_db() - mock_tar.assert_called_with(str(p)) - self.run.assert_called_once_with(['sudo', 'podman', 'exec', '-u', - 'root', 'mysql', 'mysqldump', - 'heat'], - check=True, stdout=mock.ANY) - - def test_pod_exists(self): - launcher = self.get_launcher() - self.check_call.reset_mock() - self.assertTrue(launcher.pod_exists()) - self.check_call.assert_called_once_with(['sudo', 'podman', 'pod', - 'inspect', 'ephemeral-heat'], - stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL) - - self.check_call.reset_mock() - self.check_call.side_effect = subprocess.CalledProcessError(1, 'test') - self.assertFalse(launcher.pod_exists()) - self.check_call.assert_called_once_with(['sudo', 'podman', 'pod', - 'inspect', 'ephemeral-heat'], - stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL) - - @mock.patch('os.path.exists') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.tar_file') - @mock.patch( - 'tripleoclient.heat_launcher.HeatPodLauncher._read_heat_config') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.pod_exists') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.do_backup_db') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.database_exists') - def test_rm_heat(self, mock_db_exists, mock_backup_db, mock_pod_exists, - mock_read_heat_config, mock_tar, mock_exists): - - launcher = self.get_launcher() - launcher.log_dir = '/log' - self.check_call.reset_mock() - - mock_db_exists.return_value = True - mock_pod_exists.return_value = True - mock_exists.return_value = True - mock_read_heat_config.return_value = { - 'DEFAULT': { - 'log_file': 'heat-log'}} - launcher.rm_heat() - mock_backup_db.assert_called() - calls = [ - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', 'heat', '-e', 'drop database heat']), - mock.call(['sudo', 'podman', 'exec', '-u', 'root', 'mysql', - 'mysql', '-e', "drop user 'heat'@'%'"]) - ] - self.assertEqual(self.check_call.mock_calls, calls) - mock_pod_exists.assert_called() - self.call.assert_called_once_with(['sudo', 'podman', 'pod', 'rm', '-f', - 'ephemeral-heat']) - mock_read_heat_config.assert_called() - mock_tar.assert_called_with('/log/heat-log') - - mock_backup_db.reset_mock() - self.call.reset_mock() - mock_tar.reset_mock() - mock_db_exists.return_value = False - mock_pod_exists.return_value = False - mock_exists.return_value = False - launcher.rm_heat() - mock_backup_db.assert_not_called() - self.call.assert_not_called() - mock_tar.assert_not_called() - - mock_backup_db.reset_mock() - self.call.reset_mock() - mock_tar.reset_mock() - mock_exists.reset_mock() - mock_db_exists.return_value = False - mock_pod_exists.return_value = True - mock_exists.return_value = True - launcher.rm_heat(backup_db=False) - mock_backup_db.assert_not_called() - self.call.assert_called_once_with(['sudo', 'podman', 'pod', 'rm', '-f', - 'ephemeral-heat']) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.get_pod_state') - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.pod_exists') - def test_stop_heat(self, mock_pod_exists, mock_pod_state): - launcher = self.get_launcher() - self.check_call.reset_mock() - mock_pod_exists.return_value = True - mock_pod_state.return_value = 'Running' - launcher.stop_heat() - mock_pod_exists.assert_called() - mock_pod_state.assert_called() - self.check_call.assert_called_once_with(['sudo', 'podman', 'pod', - 'stop', 'ephemeral-heat']) - - self.check_call.reset_mock() - mock_pod_exists.reset_mock() - mock_pod_state.reset_mock() - mock_pod_state.return_value = 'Exited' - mock_pod_exists.return_value = True - launcher.stop_heat() - mock_pod_exists.assert_called() - mock_pod_state.assert_called() - self.check_call.assert_not_called() - - self.check_call.reset_mock() - mock_pod_exists.reset_mock() - mock_pod_state.reset_mock() - mock_pod_state.return_value = 'Exited' - mock_pod_exists.return_value = False - launcher.stop_heat() - mock_pod_exists.assert_called() - mock_pod_state.assert_not_called() - self.check_call.assert_not_called() - - def test_check_message_bus(self): - launcher = self.get_launcher() - self.check_call.reset_mock() - launcher.check_message_bus() - self.check_call.assert_called_once_with(['sudo', 'podman', 'exec', - '-u', 'root', 'rabbitmq', - 'rabbitmqctl', 'list_queues'], - stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL) - - self.check_call.reset_mock() - self.check_call.side_effect = subprocess.CalledProcessError(1, 'test') - self.assertRaises(subprocess.CalledProcessError, - launcher.check_message_bus) - - @mock.patch( - 'tripleoclient.heat_launcher.HeatPodLauncher._get_ctlplane_ip') - def test_check_database(self, mock_ctlplane_ip): - launcher = self.get_launcher() - self.check_call.reset_mock() - - mock_ctlplane_ip.return_value = '1.1.1.1' - self.assertTrue(launcher.check_database()) - mock_ctlplane_ip.assert_called() - self.check_call.assert_called_once_with(['sudo', 'podman', 'exec', - '-u', 'root', 'mysql', - 'mysql', '-h', '1.1.1.1', - '-e', 'show databases;'], - stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL) - - self.check_call.reset_mock() - mock_ctlplane_ip.reset_mock() - self.check_call.side_effect = subprocess.CalledProcessError(1, '/test') - self.assertRaises(subprocess.CalledProcessError, - launcher.check_database) - - def test_database_exists(self): - launcher = self.get_launcher() - self.check_output.reset_mock() - self.check_output.return_value = 'heat' - self.assertTrue(launcher.database_exists()) - self.check_output.assert_called_once_with([ - 'sudo', 'podman', 'exec', '-u', 'root', 'mysql', 'mysql', '-e', - 'show databases like "heat"']) - - self.check_output.reset_mock() - self.check_output.return_value = 'nova' - self.assertFalse(launcher.database_exists()) - self.check_output.assert_called_once_with([ - 'sudo', 'podman', 'exec', '-u', 'root', 'mysql', 'mysql', '-e', - 'show databases like "heat"']) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher.pod_exists') - def test_kill_heat(self, mock_pod_exists): - launcher = self.get_launcher() - self.check_output.reset_mock() - mock_pod_exists.return_value = True - launcher.kill_heat(0) - self.call.assert_called_once_with(['sudo', 'podman', 'pod', 'kill', - 'ephemeral-heat']) - mock_pod_exists.assert_called() - - mock_pod_exists.reset_mock() - self.call.reset_mock() - mock_pod_exists.return_value = False - launcher.kill_heat(0) - mock_pod_exists.assert_called() - self.call.assert_not_called() - - def test_decode(self): - launcher = self.get_launcher() - mock_encoded = mock.Mock() - mock_decoded = mock.Mock() - mock_encoded.decode.return_value = mock_decoded - mock_decoded.endswith.return_value = False - launcher._decode(mock_encoded) - mock_encoded.decode.assert_called_with('utf-8') - - self.assertEqual('test', launcher._decode(b'test\n')) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher._decode') - def test_get_transport_url(self, mock_decode): - launcher = self.get_launcher() - mock_decode.side_effect = ['user', 'password', 'fqdn_ctlplane', 'port'] - self.assertEqual("rabbit://user:password@fqdn_ctlplane:port/?ssl=0", - launcher._get_transport_url()) - - @mock.patch( - 'tripleoclient.heat_launcher.HeatPodLauncher._get_ctlplane_vip') - def test_get_db_connection(self, mock_ctlplane_vip): - launcher = self.get_launcher() - mock_ctlplane_vip.return_value = '1.1.1.1' - self.assertEqual( - 'mysql+pymysql://' - 'heat:heat@1.1.1.1/heat?read_default_file=' - '/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo', - launcher._get_db_connection()) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher._decode') - def test_get_ctlplane_vip(self, mock_decode): - launcher = self.get_launcher() - self.check_output.reset_mock() - self.check_output.return_value = '1.1.1.1' - launcher._get_ctlplane_vip() - self.check_output.assert_called_once_with(['sudo', 'hiera', - 'controller_virtual_ip']) - mock_decode.assert_called_with('1.1.1.1') - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher._decode') - def test_get_ctlplane_ip(self, mock_decode): - launcher = self.get_launcher() - self.check_output.reset_mock() - self.check_output.return_value = '1.1.1.1' - launcher._get_ctlplane_ip() - self.check_output.assert_called_once_with(['sudo', 'hiera', - 'ctlplane']) - mock_decode.assert_called_with('1.1.1.1') - - @mock.patch('multiprocessing.cpu_count') - def test_get_num_engine_workers(self, mock_cpu_count): - launcher = self.get_launcher() - mock_cpu_count.return_value = 4 - self.assertEqual(2, launcher._get_num_engine_workers()) - - def test_wait_for_message_queue(self): - launcher = self.get_launcher() - wait_mq = launcher.wait_for_message_queue.__wrapped__ - self.check_output.return_value = 'engine.ephemeral-heat' - wait_mq(launcher) - - self.check_output.reset_mock() - self.check_output.return_value = 'heat-listener' - self.assertRaises(HeatPodMessageQueueException, wait_mq, launcher) - - def test_get_log_file_path(self): - launcher = self.get_launcher() - launcher.timestamp = '1111' - self.assertEqual('heat-1111.log', launcher._get_log_file_path()) - - @mock.patch('configparser.ConfigParser') - def test_read_heat_config(self, mock_config_parser): - launcher = self.get_launcher() - mock_cp = mock.Mock() - mock_cp.read.return_value = 'test' - mock_config_parser.return_value = mock_cp - self.assertEqual(mock_cp, launcher._read_heat_config()) - mock_config_parser.assert_called() - mock_cp.read.assert_called_with(launcher.config_file) - - @mock.patch('tripleoclient.heat_launcher.' - 'HeatPodLauncher._get_num_engine_workers') - @mock.patch( - 'tripleoclient.heat_launcher.HeatPodLauncher._get_db_connection') - @mock.patch( - 'tripleoclient.heat_launcher.HeatPodLauncher._get_transport_url') - def test_write_heat_config(self, mock_get_transport_url, mock_get_db_conn, - mock_num_engine_workers): - launcher = self.get_launcher() - launcher.api_port = '1234' - launcher.log_file = '/log/heat' - mock_get_transport_url.return_value = 'transport-url' - mock_get_db_conn.return_value = 'db-connection' - mock_num_engine_workers.return_value = 'num-engine-workers' - launcher._write_heat_config() - with open(launcher.config_file) as f: - config = f.read() - self.assertIn('num_engine_workers = num-engine-workers\n', config) - self.assertIn('connection = db-connection\n', config) - self.assertIn('transport_url=transport-url\n', config) - self.assertIn('bind_port = 1234\n', config) - self.assertIn('log_file = /log/heat\n', config) - - def test_write_heat_pod(self): - launcher = self.get_launcher() - launcher.install_dir = 'install-dir' - launcher.api_container_image = 'api-image' - launcher.engine_container_image = 'engine-image' - launcher._write_heat_pod() - pod_yaml_path = Path(launcher.heat_dir) / 'heat-pod.yaml' - with pod_yaml_path.open() as f: - pod = f.read() - self.assertIn('image: api-image', pod) - self.assertIn('image: engine-image', pod) - - -class TestHeatPodLauncherUtils(base.TestCase): - def setUp(self): - super(TestHeatPodLauncherUtils, self).setUp() - - def test_rm_heat(self): - launcher = mock.Mock() - utils.rm_heat(launcher) - launcher.rm_heat.assert_called_once_with(True) - launcher.reset_mock() - utils.rm_heat(launcher, False) - launcher.rm_heat.assert_called_once_with(False) - launcher.reset_mock() - utils.rm_heat(launcher) - launcher.rm_heat.assert_called_once_with(True) - - def test_kill_heat(self): - launcher = mock.Mock() - utils.kill_heat(launcher) - launcher.kill_heat.assert_called_once_with(None) - launcher.reset_mock() - utils._heat_pid = 111 - utils.kill_heat(launcher) - launcher.kill_heat.assert_called_once_with(111) - launcher.reset_mock() - utils.kill_heat(launcher) - launcher.kill_heat.assert_called_once_with(111) - launcher.reset_mock() - utils.kill_heat(launcher) - launcher.kill_heat.assert_called_once_with(111) - - @mock.patch('tripleoclient.heat_launcher.HeatPodLauncher') - @mock.patch('tripleoclient.heat_launcher.HeatNativeLauncher') - @mock.patch('tripleoclient.heat_launcher.HeatContainerLauncher') - def test_get_heat_launcher(self, mock_container, mock_native, mock_pod): - utils.get_heat_launcher('pod', 1, 2, 3, a='a', b='b', c='c') - mock_pod.assert_called_once_with(1, 2, 3, a='a', b='b', c='c') - utils.get_heat_launcher('native', 1, 2, 3, a='a', b='b', c='c') - mock_native.assert_called_once_with(1, 2, 3, a='a', b='b', c='c') - utils.get_heat_launcher('container', 1, 2, 3, a='a', b='b', c='c') - mock_container.assert_called_once_with(1, 2, 3, a='a', b='b', c='c') - - def test_heat_api_port(self): - test_port = utils.test_heat_api_port.__wrapped__ - mock_socket = mock.Mock() - host = '1.1.1.1' - port = 1234 - test_port(mock_socket, host, port) - mock_socket.connect.assert_called_once_with((host, port)) - - @mock.patch('tripleoclient.utils.test_heat_api_port') - @mock.patch('tripleo_common.utils.heat.local_orchestration_client') - @mock.patch('socket.socket') - @mock.patch('tripleoclient.utils.get_heat_launcher') - def test_launch_heat(self, mock_get_heat_launcher, mock_socket, - mock_local_client, mock_test_port): - utils._local_orchestration_client = 'client' - self.assertEqual('client', utils.launch_heat()) - mock_get_heat_launcher.assert_not_called() - - utils._local_orchestration_client = None - mock_launcher = mock.Mock() - mock_launcher.api_port = 1234 - mock_launcher.heat_type = 'pod' - mock_get_heat_launcher.return_value = mock_launcher - mock_socket.return_value = 'socket' - utils.launch_heat() - mock_get_heat_launcher.assert_called_once() - mock_launcher.check_database.assert_called_once_with() - mock_launcher.check_message_bus.assert_called_once_with() - mock_launcher.heat_db_sync.assert_called_once_with(False) - mock_launcher.launch_heat.assert_called_once_with() - mock_test_port.assert_called_once_with( - 'socket', mock_launcher.host, - int(mock_launcher.api_port)) - mock_launcher.wait_for_message_queue.assert_called_once_with() - mock_local_client.assert_called_once_with( - mock_launcher.host, - mock_launcher.api_port) - - -class TestHeatNativeLauncher(base.TestCase): - def setUp(self): - super(TestHeatNativeLauncher, self).setUp() - self.run = mock.patch('subprocess.run').start() - self.popen = mock.patch('subprocess.Popen').start() - self.mock_popen = mock.Mock() - self.mock_popen.communicate.return_value = ("", "") - self.popen.return_value = self.mock_popen - self.getpwnam = mock.patch('pwd.getpwnam').start() - self.getgrnam = mock.patch('grp.getgrnam').start() - self.chown = mock.patch('os.chown').start() - - self.templates_dir = mock.patch( - 'tripleoclient.heat_launcher.DEFAULT_TEMPLATES_DIR', - os.path.join(os.path.dirname(__file__), - '..', '..', 'templates')).start() - self.heat_dir = self.useFixture(fixtures.TempDir()).path - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - - self.addCleanup(mock.patch.stopall) - - def get_launcher(self, **kwargs): - return heat_launcher.HeatNativeLauncher( - heat_dir=self.heat_dir, - use_tmp_dir=True, - use_root=True, - **kwargs) - - def test_heat_dir_no_exist(self): - shutil.rmtree(self.heat_dir) - launcher = self.get_launcher() - self.assertNotEqual(self.heat_dir, launcher.install_dir) - - @mock.patch('tempfile.mkdtemp') - def test_get_launcher(self, mock_mkdtemp): - mock_mkdtemp.return_value = self.tmp_dir - - def test_install_dir(): - mock_mkdtemp.assert_called() - return ("", "") - - # Test that tempfile.mkdtemp is called before the tmpfs is setup, - # so that the tmpfs mount is created at the temp dir. - self.mock_popen.communicate.side_effect = test_install_dir - self.get_launcher() - self.assertEqual(['mount', '-t', 'tmpfs'], - self.popen.call_args_list[1][0][0][0:3]) diff --git a/tripleoclient/tests/test_overcloud_credentials.py b/tripleoclient/tests/test_overcloud_credentials.py deleted file mode 100644 index f7b6cc431..000000000 --- a/tripleoclient/tests/test_overcloud_credentials.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import shutil -import tempfile -from unittest import mock - -from tripleoclient.tests.v1 import test_plugin -from tripleoclient.v1 import overcloud_credentials - - -class TestOvercloudCredentials(test_plugin.TestPluginV1): - - def setUp(self): - super(TestOvercloudCredentials, self).setUp() - - self.cmd = overcloud_credentials.OvercloudCredentials(self.app, None) - self.tripleoclient = mock.Mock() - self.app.client_manager.tripleoclient = self.tripleoclient - - @mock.patch("tripleoclient.utils.run_ansible_playbook", autospec=True) - def test_ok(self, mock_run_playbook): - arglist = ['overcloud', ] - verifylist = [ - ('stack', 'overcloud'), - ('directory', '.') - ] - - self.check_parser(self.cmd, arglist, verifylist) - - @mock.patch("tripleoclient.utils.run_ansible_playbook", autospec=True) - def test_okay_custom_dir(self, mock_run_playbook): - - temp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, temp) - - arglist = ['overcloud', '--directory', temp] - verifylist = [ - ('stack', 'overcloud'), - ('directory', temp) - ] - self.check_parser(self.cmd, arglist, verifylist) diff --git a/tripleoclient/tests/test_plugin.py b/tripleoclient/tests/test_plugin.py deleted file mode 100644 index ab44ae334..000000000 --- a/tripleoclient/tests/test_plugin.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from tripleoclient import plugin -from tripleoclient.tests import base -from tripleoclient.tests import fakes - - -class TestPlugin(base.TestCase): - - def test_make_client(self): - clientmgr = mock.MagicMock() - clientmgr.get_endpoint_for_service_type.return_value = fakes.WS_URL - - clientmgr.auth.get_token.return_value = "TOKEN" - clientmgr.auth_ref.project_id = "ID" - clientmgr.cacert = None - - plugin.make_client(clientmgr) - - # And the functions should only be called when the client is created: - self.assertEqual(clientmgr.auth.get_token.call_count, 0) - self.assertEqual(clientmgr.get_endpoint_for_service_type.call_count, 0) diff --git a/tripleoclient/tests/test_utils.py b/tripleoclient/tests/test_utils.py deleted file mode 100644 index 40e61b5d4..000000000 --- a/tripleoclient/tests/test_utils.py +++ /dev/null @@ -1,2853 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -import ansible_runner -import datetime -import errno -import fixtures -import logging -import openstack -import os -import os.path -import shutil -import socket -import subprocess -import tempfile -from unittest import mock - -import sys - -from heatclient import exc as hc_exc - -from uuid import uuid4 - -from testscenarios import TestWithScenarios -from unittest import TestCase -import yaml - -from tripleoclient import exceptions -from tripleoclient import utils - -from tripleoclient.tests import base -from tripleoclient.tests import fakes - -from configparser import ConfigParser -from urllib import error as url_error - -from ansible_runner import Runner - - -class TestRunAnsiblePlaybook(TestCase): - def setUp(self): - self.unlink_patch = mock.patch('os.unlink') - self.addCleanup(self.unlink_patch.stop) - self.unlink_patch.start() - self.mock_log = mock.Mock('logging.getLogger') - self.ansible_playbook_cmd = "ansible-playbook" - self.orig_workdir = utils.constants.DEFAULT_WORK_DIR - utils.constants.DEFAULT_WORK_DIR = utils.TempDirs().dir - utils.makedirs( - os.path.join( - utils.constants.DEFAULT_WORK_DIR, - 'overcloud' - ) - ) - ansible_runner.Runner.stdout = mock.MagicMock() - ansible_runner.Runner.stdout.read = mock.MagicMock(return_value='') - - def tearDown(self): - utils.constants.DEFAULT_WORK_DIR = self.orig_workdir - - @mock.patch('os.makedirs') - @mock.patch('os.path.exists', return_value=False) - @mock.patch('tripleoclient.utils.run_command_and_log') - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_no_playbook(self, mock_dump_artifact, mock_run, mock_exists, - mock_mkdir): - self.assertRaises( - RuntimeError, - utils.run_ansible_playbook, - 'non-existing.yaml', - 'localhost,', - utils.constants.DEFAULT_WORK_DIR - ) - mock_exists.assert_called_with(os.path.join( - utils.constants.DEFAULT_WORK_DIR, 'non-existing.yaml')) - mock_run.assert_not_called() - - @mock.patch('tempfile.mkstemp', return_value=('foo', os.path.join( - utils.constants.DEFAULT_WORK_DIR, 'fooBar.cfg'))) - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=1) - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_subprocess_error(self, mock_dump_artifact, - mock_run, mock_mkdirs, mock_exists, - mock_mkstemp): - with self.assertRaises(RuntimeError): - utils.run_ansible_playbook( - 'existing.yaml', - 'localhost,', - utils.constants.DEFAULT_WORK_DIR - ) - - @mock.patch('tempfile.mkstemp', return_value=('foo', os.path.join( - utils.constants.DEFAULT_WORK_DIR, 'fooBar.cfg'))) - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return() - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_run_success_default(self, mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists, mock_mkstemp): - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR - ) - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return() - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_run_success_ansible_cfg(self, mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists): - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR - ) - - @mock.patch('tempfile.mkstemp', return_value=('foo', os.path.join( - utils.constants.DEFAULT_WORK_DIR, 'fooBar.cfg'))) - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.makedirs') - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return() - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_run_success_connection_local(self, mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists, - mock_mkstemp): - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR, - connection='local' - ) - - @mock.patch('os.makedirs', return_value=None) - @mock.patch('tempfile.mkstemp', return_value=('foo', os.path.join( - utils.constants.DEFAULT_WORK_DIR, 'fooBar.cfg'))) - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return() - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_run_success_gathering_policy(self, mock_dump_artifact, mock_run, - mock_exists, mock_mkstemp, - mock_makedirs): - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR, - connection='local', - gathering_policy='smart' - ) - - @mock.patch('os.makedirs', return_value=None) - @mock.patch('tempfile.mkstemp', return_value=('foo', os.path.join( - utils.constants.DEFAULT_WORK_DIR, 'fooBar.cfg'))) - @mock.patch('os.path.exists', return_value=True) - @mock.patch.object( - Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return() - ) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_run_success_extra_vars(self, mock_dump_artifact, mock_run, - mock_exists, mock_mkstemp, mock_makedirs): - arglist = { - 'var_one': 'val_one', - } - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR, - connection='local', - gathering_policy='smart', - extra_vars=arglist - ) - - @mock.patch('os.chmod') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.utils.makedirs') - @mock.patch('os.path.exists', side_effect=(False, True, True)) - def test_run_with_timeout(self, mock_exists, mock_mkdir, mock_open, - mock_chmod): - ansible_runner.ArtifactLoader = mock.MagicMock() - ansible_runner.Runner.run = mock.MagicMock(return_value=('', 0)) - ansible_runner.runner_config = mock.MagicMock() - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR, - timeout=42 - ) - self.assertIn(mock.call(os.path.join(utils.constants.DEFAULT_WORK_DIR, - 'env/settings'), 'w'), - mock_open.mock_calls) - self.assertIn( - mock.call().__enter__().write('job_timeout: 2520\n'), # 42m * 60 - mock_open.mock_calls) - - @mock.patch('os.chmod') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.utils.makedirs') - @mock.patch('os.path.exists', side_effect=(False, True, True)) - def test_run_with_extravar_file(self, mock_exists, mock_mkdir, mock_open, - mock_chmod): - ansible_runner.ArtifactLoader = mock.MagicMock() - ansible_runner.Runner.run = mock.MagicMock(return_value=('', 0)) - ansible_runner.runner_config = mock.MagicMock() - utils.run_ansible_playbook( - playbook='existing.yaml', - inventory='localhost,', - workdir=utils.constants.DEFAULT_WORK_DIR, - extra_vars_file={ - 'foo': 'bar', - 'things': { - 'more': 'options' - }, - 'num': 42 - } - ) - self.assertIn( - mock.call(os.path.join(utils.constants.DEFAULT_WORK_DIR, - 'env/extravars'), 'w'), - mock_open.mock_calls - ) - self.assertIn( - mock.call().__enter__().write( - 'foo: bar\nnum: 42\nthings:\n more: options\n' - ), - mock_open.mock_calls - ) - - -class TestRunRolePlaybooks(TestCase): - def setUp(self): - tmp_dir = utils.TempDirs().dir - self.work_dir = os.path.join(tmp_dir, 'working_dir') - utils.makedirs(self.work_dir) - self.inventory_path = os.path.join( - self.work_dir, 'tripleo-ansible-inventory.yaml') - with open(self.inventory_path, 'w') as f: - f.write('{}') - - self.cmd = mock.Mock() - self.cmd.app.options.debug = False - self.cmd.app_args.verbose_level = 0 - - @mock.patch('tripleoclient.utils.run_ansible_playbook') - def test_network_config(self, mock_run): - roles = [ - {'count': 10, 'name': 'Compute'}, - {'count': 3, 'name': 'Controller'} - ] - utils.run_role_playbooks(self.cmd, self.work_dir, self.work_dir, - roles, True) - - self.assertEqual(3, mock_run.call_count) - mock_run.assert_has_calls([ - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Compute', - extra_vars={} - ), - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Controller', - extra_vars={} - ), - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-network-config.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts=None, - extra_vars={} - ) - ]) - - @mock.patch('tripleoclient.utils.run_ansible_playbook') - def test_no_network_config(self, mock_run): - roles = [ - {'count': 10, 'name': 'Compute'}, - {'count': 3, 'name': 'Controller'} - ] - utils.run_role_playbooks(self.cmd, self.work_dir, self.work_dir, - roles, False) - - self.assertEqual(2, mock_run.call_count) - mock_run.assert_has_calls([ - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Compute', - extra_vars={} - ), - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Controller', - extra_vars={} - ) - ]) - - @mock.patch('tripleoclient.utils.run_ansible_playbook') - def test_override_growvols(self, mock_run): - roles = [ - {'count': 10, 'name': 'Compute'}, - { - 'count': 3, - 'name': 'Controller', - 'ansible_playbooks': [ - { - 'playbook': '/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - 'extra_vars': { - 'growvols_args': '/var=50% /srv=50%' - } - } - ] - } - ] - utils.run_role_playbooks(self.cmd, self.work_dir, self.work_dir, - roles, False) - - self.assertEqual(2, mock_run.call_count) - mock_run.assert_has_calls([ - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Compute', - extra_vars={} - ), - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Controller', - extra_vars={'growvols_args': '/var=50% /srv=50%'} - ) - ]) - - @mock.patch('tripleoclient.utils.run_ansible_playbook') - def test_role_playbooks(self, mock_run): - roles = [ - # No playbooks should execute for the role if count is 0. - {'count': 0, 'name': 'ZeroNodesRole'}, - {'count': 10, 'name': 'Compute'}, - { - 'count': 3, - 'name': 'Controller', - 'ansible_playbooks': [ - { - 'playbook': 'the_thing.yaml' - }, - { - 'playbook': '/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - 'extra_vars': { - 'growvols_args': '/var=50% /srv=50%' - } - }, - { - 'playbook': 'the_other_thing.yaml' - }, - ] - } - ] - utils.run_role_playbooks(self.cmd, self.work_dir, self.work_dir, - roles, True) - - self.assertEqual(5, mock_run.call_count) - mock_run.assert_has_calls([ - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Compute', - extra_vars={} - ), - mock.call( - playbook=os.path.join(self.work_dir, 'the_thing.yaml'), - inventory={}, - workdir=mock.ANY, - playbook_dir=self.work_dir, - verbosity=0, - limit_hosts='Controller', - extra_vars={} - ), - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-growvols.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts='Controller', - extra_vars={'growvols_args': '/var=50% /srv=50%'} - ), - mock.call( - playbook=os.path.join(self.work_dir, 'the_other_thing.yaml'), - inventory={}, - workdir=mock.ANY, - playbook_dir=self.work_dir, - verbosity=0, - limit_hosts='Controller', - extra_vars={} - ), - mock.call( - playbook='/usr/share/ansible/tripleo-playbooks/' - 'cli-overcloud-node-network-config.yaml', - inventory={}, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=0, - limit_hosts=None, - extra_vars={} - ) - ]) - - -class TestRunCommandAndLog(TestCase): - def setUp(self): - self.mock_logger = mock.Mock(spec=logging.Logger) - - self.mock_process = mock.Mock() - self.mock_process.stdout.readline.side_effect = ['foo\n', 'bar\n'] - self.mock_process.wait.side_effect = [0] - self.mock_process.returncode = 0 - - mock_sub = mock.patch('subprocess.Popen', - return_value=self.mock_process) - self.mock_popen = mock_sub.start() - self.addCleanup(mock_sub.stop) - - self.cmd = ['exit', '0'] - self.e_cmd = ['exit', '1'] - self.log_calls = [mock.call('foo'), - mock.call('bar')] - - def test_success_default(self): - retcode = utils.run_command_and_log(self.mock_logger, self.cmd) - self.mock_popen.assert_called_once_with(self.cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, - cwd=None, env=None) - self.assertEqual(retcode, 0) - self.mock_logger.warning.assert_has_calls(self.log_calls, - any_order=False) - - @mock.patch('subprocess.Popen') - def test_error_subprocess(self, mock_popen): - mock_process = mock.Mock() - mock_process.stdout.readline.side_effect = ['Error\n'] - mock_process.wait.side_effect = [1] - mock_process.returncode = 1 - - mock_popen.return_value = mock_process - - retcode = utils.run_command_and_log(self.mock_logger, self.e_cmd) - mock_popen.assert_called_once_with(self.e_cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, cwd=None, - env=None) - - self.assertEqual(retcode, 1) - self.mock_logger.warning.assert_called_once_with('Error') - - def test_success_env(self): - test_env = os.environ.copy() - retcode = utils.run_command_and_log(self.mock_logger, self.cmd, - env=test_env) - self.mock_popen.assert_called_once_with(self.cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, - cwd=None, env=test_env) - self.assertEqual(retcode, 0) - self.mock_logger.warning.assert_has_calls(self.log_calls, - any_order=False) - - def test_success_cwd(self): - test_cwd = '/usr/local/bin' - retcode = utils.run_command_and_log(self.mock_logger, self.cmd, - cwd=test_cwd) - self.mock_popen.assert_called_once_with(self.cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=False, - cwd=test_cwd, env=None) - self.assertEqual(retcode, 0) - self.mock_logger.warning.assert_has_calls(self.log_calls, - any_order=False) - - -class TestWaitForStackUtil(TestCase): - def setUp(self): - self.mock_orchestration = mock.Mock() - sleep_patch = mock.patch('time.sleep') - self.addCleanup(sleep_patch.stop) - sleep_patch.start() - - def mock_event(self, resource_name, id, resource_status_reason, - resource_status, event_time): - e = mock.Mock() - e.resource_name = resource_name - e.id = id - e.resource_status_reason = resource_status_reason - e.resource_status = resource_status - e.event_time = event_time - return e - - @mock.patch("heatclient.common.event_utils.get_events") - def test_wait_for_stack_ready(self, mock_el): - stack = mock.Mock() - stack.stack_name = 'stack' - stack.stack_status = "CREATE_COMPLETE" - self.mock_orchestration.stacks.get.return_value = stack - - complete = utils.wait_for_stack_ready(self.mock_orchestration, 'stack') - self.assertTrue(complete) - - @mock.patch("time.sleep") - @mock.patch("heatclient.common.event_utils.poll_for_events") - @mock.patch("tripleoclient.utils.get_stack") - def test_wait_for_stack_ready_retry(self, mock_get_stack, mock_poll, - mock_time): - stack = mock.Mock() - stack.stack_name = 'stack' - stack.stack_id = 'id' - stack.stack_status = "CREATE_COMPLETE" - mock_get_stack.return_value = stack - mock_poll.side_effect = [hc_exc.HTTPException(code=504), - ("CREATE_COMPLETE", "ready retry message")] - - complete = utils.wait_for_stack_ready(self.mock_orchestration, 'stack') - self.assertTrue(complete) - - @mock.patch("time.sleep") - @mock.patch("heatclient.common.event_utils.poll_for_events") - @mock.patch("tripleoclient.utils.get_stack") - def test_wait_for_stack_ready_retry_fail(self, mock_get_stack, mock_poll, - mock_time): - stack = mock.Mock() - stack.stack_name = 'stack' - stack.stack_id = 'id' - stack.stack_status = "CREATE_COMPLETE" - mock_get_stack.return_value = stack - mock_poll.side_effect = hc_exc.HTTPException(code=504) - - self.assertRaises(RuntimeError, - utils.wait_for_stack_ready, - self.mock_orchestration, 'stack') - - @mock.patch("time.sleep") - @mock.patch("heatclient.common.event_utils.poll_for_events") - @mock.patch("tripleoclient.utils.get_stack") - def test_wait_for_stack_ready_server_fail(self, mock_get_stack, mock_poll, - mock_time): - stack = mock.Mock() - stack.stack_name = 'stack' - stack.stack_id = 'id' - stack.stack_status = "CREATE_COMPLETE" - mock_get_stack.return_value = stack - mock_poll.side_effect = hc_exc.HTTPException(code=500) - - self.assertRaises(RuntimeError, - utils.wait_for_stack_ready, - self.mock_orchestration, 'stack') - - def test_wait_for_stack_ready_no_stack(self): - self.mock_orchestration.stacks.get.return_value = None - - complete = utils.wait_for_stack_ready(self.mock_orchestration, 'stack') - - self.assertFalse(complete) - - @mock.patch("heatclient.common.event_utils.get_events") - def test_wait_for_stack_ready_failed(self, mock_el): - stack = mock.Mock() - stack.stack_name = 'stack' - stack.stack_status = "CREATE_FAILED" - self.mock_orchestration.stacks.get.return_value = stack - - complete = utils.wait_for_stack_ready(self.mock_orchestration, 'stack') - - self.assertFalse(complete) - - @mock.patch("heatclient.common.event_utils.poll_for_events") - def test_wait_for_stack_in_progress(self, mock_poll_for_events): - - mock_poll_for_events.return_value = ("CREATE_IN_PROGRESS", "MESSAGE") - - stack = mock.Mock() - stack.stack_name = 'stack' - stack.stack_status = 'CREATE_IN_PROGRESS' - self.mock_orchestration.stacks.get.return_value = stack - - result = utils.wait_for_stack_ready(self.mock_orchestration, 'stack') - self.assertEqual(False, result) - - def test_check_service_vips_migrated_to_service(self): - env_reg = { - 'OS::TripleO::Network::Ports::RedisVipPort': 'val', - 'OS::TripleO::Network::Ports::OVNDBsVipPort': 'val', - } - env = { - 'resource_registry': env_reg - } - - self.assertRaises(exceptions.InvalidConfiguration, - utils.check_service_vips_migrated_to_service, - env) - - def test_check_ceph_fsid_matches_env_files(self): - stack_params = { - 'CephClusterFSID': 'ceph_fsid_val', - 'key1': 'val1', - 'key2': 'val2', - } - mock_stack = mock.MagicMock() - mock_stack.environment = mock.MagicMock() - mock_stack.environment.return_value = { - 'parameter_defaults': stack_params - } - provided_env = { - 'parameter_defaults': { - 'CephClusterFSID': mock_stack.environment() - .get('parameter_defaults', {}) - .get('CephClusterFSID', False), - 'key1': 'val1', - 'key2': 'val2', - } - } - utils.check_ceph_fsid_matches_env_files(mock_stack.environment(), - provided_env) - - def test_check_ceph_fsid_matches_env_files_fail(self): - stack_params = { - 'CephClusterFSID': 'ceph_fsid_val', - 'key1': 'val1', - 'key2': 'val2', - } - provided_env = { - 'parameter_defaults': { - 'CephClusterFSID': 'new_or_wrong_fsid_val', - 'key1': 'val1', - 'key2': 'val2', - } - } - mock_stack = mock.MagicMock() - mock_stack.environment = mock.MagicMock() - mock_stack.environment.return_value = { - 'parameter_defaults': stack_params - } - with self.assertRaises(exceptions.InvalidConfiguration): - utils.check_ceph_fsid_matches_env_files(mock_stack.environment(), - provided_env) - - def test_check_ceph_ansible(self): - res_reg = { - 'resource_registry': { - 'OS::Tripleo::Services::CephMon': '/path/to/ceph-ansible.yml', - } - } - - utils.check_ceph_ansible(res_reg.get('resource_registry', {}), - 'UpgradePrepare') - utils.check_ceph_ansible(res_reg.get('resource_registry', {}), - 'UpgradeConverge') - - def test_check_ceph_ansible_fail(self): - res_reg = { - 'resource_registry': { - 'OS::Tripleo::Services::CephMon': '/path/to/ceph-ansible.yml', - } - } - - with self.assertRaises(exceptions.InvalidConfiguration): - utils.check_ceph_ansible(res_reg.get('resource_registry', {}), - 'DeployOvercloud') - - def test_check_deployed_ceph_stage(self): - - env = { - 'resource_registry': { - 'OS::Tripleo::Services::CephMon': '/path/cephadm/ceph-mon.yml', - 'OS::TripleO::Services::CephMgr': '/path/cephadm/ceph-mgr.yml', - 'OS::TripleO::Services::CephMon': '/path/cephadm/ceph-mon.yml', - 'OS::TripleO::Services::CephOSD': '/path/cephadm/ceph-osd.yml', - 'OS::TripleO::Services::CephMds': '/path/cephadm/ceph-mds.yml', - 'OS::TripleO::Services::CephNfs': '/path/cephadm/ceph-nfs.yml', - 'OS::TripleO::Services::CephRgw': '/path/cephadm/ceph-rgw.yml', - }, - 'parameter_defaults': { - 'DeployedCeph': True - } - } - - utils.check_deployed_ceph_stage(env) - - def test_check_deployed_ceph_stage_fail(self): - - env = { - 'resource_registry': { - 'OS::Tripleo::Services::CephMon': '/path/cephadm/ceph-mon.yml', - 'OS::TripleO::Services::CephMgr': '/path/cephadm/ceph-mgr.yml', - 'OS::TripleO::Services::CephMon': '/path/cephadm/ceph-mon.yml', - 'OS::TripleO::Services::CephOSD': '/path/cephadm/ceph-osd.yml', - 'OS::TripleO::Services::CephMds': '/path/cephadm/ceph-mds.yml', - 'OS::TripleO::Services::CephNfs': '/path/cephadm/ceph-nfs.yml', - 'OS::TripleO::Services::CephRgw': '/path/cephadm/ceph-rgw.yml', - }, - 'parameter_defaults': { - 'DeployedCeph': False - } - } - - with self.assertRaises(exceptions.InvalidConfiguration): - utils.check_deployed_ceph_stage(env) - - def test_check_deployed_ceph_stage_external(self): - - env = { - 'resource_registry': { - 'OS::Tripleo::Services::CephExternal': '/path/cephadm/ceph-client.yml', # noqa E501 - }, - 'parameter_defaults': { - 'DeployedCeph': False - } - } - - with self.assertRaises(exceptions.InvalidConfiguration): - utils.check_deployed_ceph_stage(env) - - def test_check_swift_and_rgw(self): - stack_reg = { - 'OS::TripleO::Services::SwiftProxy': 'OS::Heat::None', - } - env_reg = { - 'OS::TripleO::Services::CephRgw': 'val', - } - mock_stack = mock.MagicMock() - mock_stack.environment = mock.MagicMock() - mock_stack.environment.return_value = { - 'resource_registry': stack_reg, - } - env = { - 'resource_registry': env_reg, - } - - utils.check_swift_and_rgw(mock_stack.environment(), - env, 'UpgradePrepare') - - def test_check_swift_and_rgw_fail(self): - stack_reg = { - 'OS::TripleO::Services::SwiftProxy': 'val', - } - env_reg = { - 'OS::TripleO::Services::CephRgw': 'val', - } - mock_stack = mock.MagicMock() - mock_stack.environment = mock.MagicMock() - mock_stack.environment.return_value = { - 'resource_registry': stack_reg, - } - env = { - 'resource_registry': env_reg, - } - with self.assertRaises(exceptions.InvalidConfiguration): - utils.check_swift_and_rgw(mock_stack.environment(), - env, 'UpgradePrepare') - - @mock.patch('os.path.isfile', return_value=False) - def test_check_network_plugin_no_neutron(self, mock_file): - fake_env = { - 'parameter_defaults': { - 'NeutronMechanismDrivers': ['ovn']}, - } - utils.check_network_plugin('/tmp', - fake_env) - mock_file.assert_not_called() - - @mock.patch('os.path.isfile', return_value=False) - def test_check_network_plugin_inventory_missing(self, mock_file): - fake_env = { - 'parameter_defaults': { - 'NeutronMechanismDrivers': ['ovn']}, - 'resource_registry': { - 'OS::TripleO::Services::NeutronApi': 'foo'} - } - with self.assertRaises(exceptions.InvalidConfiguration): - utils.check_network_plugin('/tmp', - fake_env) - - @mock.patch('os.path.isfile', return_value=True) - def test_check_network_plugin_inventory_ovs_match(self, mock_file): - fake_env = { - 'parameter_defaults': { - 'NeutronMechanismDrivers': ['openvswitch']}, - 'resource_registry': { - 'OS::TripleO::Services::NeutronApi': 'foo'} - } - mock_open_ctx = mock.mock_open(read_data='neutron_ovs_agent') - with mock.patch('builtins.open', mock_open_ctx): - utils.check_network_plugin('/tmp', - fake_env) - - @mock.patch('os.path.isfile', return_value=True) - def test_check_network_plugin_inventory_ovs_mismatch(self, mock_file): - fake_env = { - 'parameter_defaults': { - 'NeutronMechanismDrivers': ['ovn']}, - 'resource_registry': { - 'OS::TripleO::Services::NeutronApi': 'foo'} - } - with self.assertRaises(exceptions.InvalidConfiguration): - mock_open_ctx = mock.mock_open(read_data='neutron_ovs_agent') - with mock.patch('builtins.open', mock_open_ctx): - utils.check_network_plugin('/tmp', - fake_env) - - @mock.patch('os.path.isfile', return_value=True) - def test_check_network_plugin_inventory_ovn_match(self, mock_file): - fake_env = { - 'parameter_defaults': { - 'NeutronMechanismDrivers': ['ovn']}, - 'resource_registry': { - 'OS::TripleO::Services::NeutronApi': 'foo'} - } - mock_open_ctx = mock.mock_open(read_data='ovn_controller') - with mock.patch('builtins.open', mock_open_ctx): - utils.check_network_plugin('/tmp', - fake_env) - - @mock.patch('os.path.isfile', return_value=True) - def test_check_network_plugin_inventory_ovn_mismatch(self, mock_file): - fake_env = { - 'parameter_defaults': { - 'NeutronMechanismDrivers': ['openvswitch']}, - 'resource_registry': { - 'OS::TripleO::Services::NeutronApi': 'foo'} - } - with self.assertRaises(exceptions.InvalidConfiguration): - mock_open_ctx = mock.mock_open(read_data='ovn_controller') - with mock.patch('builtins.open', mock_open_ctx): - utils.check_network_plugin('/tmp', - fake_env) - - @mock.patch('subprocess.check_call') - @mock.patch('os.path.exists') - def test_remove_known_hosts(self, mock_exists, mock_check_call): - - mock_exists.return_value = True - - utils.remove_known_hosts('192.168.0.1') - known_hosts = os.path.expanduser("~/.ssh/known_hosts") - - mock_check_call.assert_called_with( - ['ssh-keygen', '-R', '192.168.0.1', '-f', known_hosts]) - - @mock.patch('subprocess.check_call') - @mock.patch('os.path.exists') - def test_remove_known_hosts_no_file(self, mock_exists, mock_check_call): - - mock_exists.return_value = False - - utils.remove_known_hosts('192.168.0.1') - - mock_check_call.assert_not_called() - - def test_empty_file_checksum(self): - # Used a NamedTemporaryFile since it's deleted when the file is closed. - with tempfile.NamedTemporaryFile() as empty_temp_file: - self.assertEqual( - utils.file_checksum(empty_temp_file.name), - ( - 'cf83e1357eefb8bdf1542850d66d8007' - 'd620e4050b5715dc83f4a921d36ce9ce47' - 'd0d13c5d85f2b0ff8318d2877eec2f63b' - '931bd47417a81a538327af927da3e')) - - def test_non_empty_file_checksum(self): - # Used a NamedTemporaryFile since it's deleted when the file is closed. - with tempfile.NamedTemporaryFile() as temp_file: - temp_file.write(b'foo') - temp_file.flush() - - self.assertEqual( - utils.file_checksum(temp_file.name), - ( - 'f7fbba6e0636f890e56fbbf3283e52' - '4c6fa3204ae298382d624741d0dc663' - '8326e282c41be5e4254d8820772c55' - '18a2c5a8c0c7f7eda19594a7eb539453e1ed7')) - - def test_non_empty_file_checksum_SHA256(self): - """Test 'file_checksum' function with an alternative algorithm. - """ - # Used a NamedTemporaryFile since it's deleted when the file is closed. - with tempfile.NamedTemporaryFile() as temp_file: - temp_file.write(b'foo') - temp_file.flush() - - self.assertEqual( - utils.file_checksum(temp_file.name, 'sha256'), - ( - '2c26b46b68ffc68ff99b453c1d304134' - '13422d706483bfa0f98a5e886266e7ae')) - - def test_non_empty_file_checksum_non_compliant(self): - """Test 'file_checksum' function with an alternative algorithm - that isn't permitted by the FIPS. - """ - # Used a NamedTemporaryFile since it's deleted when the file is closed. - with tempfile.NamedTemporaryFile() as temp_file: - temp_file.write(b'foo') - temp_file.flush() - - self.assertRaises(RuntimeError, utils.file_checksum, - temp_file.name, 'md5') - - def test_shouldnt_checksum_open_special_files(self): - self.assertRaises(ValueError, utils.file_checksum, '/dev/random') - self.assertRaises(ValueError, utils.file_checksum, '/dev/zero') - - -class TestEnsureRunAsNormalUser(TestCase): - - @mock.patch('os.geteuid') - def test_ensure_run_as_normal_user(self, os_geteuid_mock): - os_geteuid_mock.return_value = 1000 - self.assertIsNone(utils.ensure_run_as_normal_user()) - - @mock.patch('os.geteuid') - def test_ensure_run_as_normal_user_root(self, os_geteuid_mock): - os_geteuid_mock.return_value = 0 - self.assertRaises(exceptions.RootUserExecution, - utils.ensure_run_as_normal_user) - - @mock.patch('getpass.getuser') - def test_get_deployment_user(self, mock_getpass): - mock_getpass.return_value = 'stack' - u = utils.get_deployment_user() - self.assertEqual('stack', u) - - -class TestCreateTempestDeployerInput(TestCase): - - def test_create_tempest_deployer_input(self): - with tempfile.NamedTemporaryFile() as cfgfile: - filepath = cfgfile.name - utils.create_tempest_deployer_input(filepath) - with open(filepath, 'rt') as f: - cfg = f.read() - # Just make a simple test, to make sure it created a proper file: - self.assertIn( - '[volume-feature-enabled]\nbootable = true', cfg) - - -class TestGetStackOutputItem(TestCase): - - def test_get_stack_output_item(self): - stack = mock.MagicMock() - emap = {'KeystonePublic': {'uri': 'http://foo:8000/'}} - stack.to_dict.return_value = { - 'outputs': [{'output_key': 'EndpointMap', - 'output_value': emap}] - } - - endpoint_map = utils.get_stack_output_item(stack, 'EndpointMap') - self.assertEqual(endpoint_map, - {'KeystonePublic': {'uri': 'http://foo:8000/'}}) - - def test_get_stack_output_item_not_found(self): - stack = mock.MagicMock() - stack.to_dict.return_value = { - 'outputs': [{'output_key': 'foo', - 'output_value': 'bar'}] - } - - val = utils.get_stack_output_item(stack, 'baz') - self.assertEqual(val, None) - - def test_get_stack_output_item_no_stack(self): - stack = None - val = utils.get_stack_output_item(stack, 'baz') - self.assertEqual(val, None) - - -class TestGetEndpointMap(TestCase): - - @mock.patch('tripleoclient.utils.get_stack_saved_output_item') - def test_get_endpoint_map(self, mock_saved_output_item): - working_dir = mock.Mock() - emap = {'KeystonePublic': {'uri': 'http://foo:8000/'}} - mock_saved_output_item.return_value = emap - endpoint_map = utils.get_endpoint_map(working_dir) - self.assertEqual(endpoint_map, - {'KeystonePublic': {'uri': 'http://foo:8000/'}}) - - -class TestNodeGetCapabilities(TestCase): - def test_with_capabilities(self): - node = mock.Mock(properties={'capabilities': 'x:y,foo:bar'}) - self.assertEqual({'x': 'y', 'foo': 'bar'}, - utils.node_get_capabilities(node)) - - def test_no_capabilities(self): - node = mock.Mock(properties={}) - self.assertEqual({}, utils.node_get_capabilities(node)) - - -class TestNodeAddCapabilities(TestCase): - def test_add(self): - bm_client = mock.Mock() - node = mock.Mock(uuid='uuid1', properties={}) - new_caps = utils.node_add_capabilities(bm_client, node, x='y') - bm_client.node.update.assert_called_once_with( - 'uuid1', [{'op': 'add', 'path': '/properties/capabilities', - 'value': 'x:y'}]) - self.assertEqual('x:y', node.properties['capabilities']) - self.assertEqual({'x': 'y'}, new_caps) - - -class TestAssignVerifyProfiles(TestCase): - def setUp(self): - - super(TestAssignVerifyProfiles, self).setUp() - self.bm_client = mock.Mock(spec=['node'], - node=mock.Mock(spec=['list', 'update'])) - self.nodes = [] - self.bm_client.node.list.return_value = self.nodes - self.flavors = {name: (fakes.FakeFlavor(name), 1) - for name in ('compute', 'control')} - - def _get_fake_node(self, profile=None, possible_profiles=[], - provision_state='available'): - caps = {'%s_profile' % p: '1' - for p in possible_profiles} - if profile is not None: - caps['profile'] = profile - caps = utils.dict_to_capabilities(caps) - return mock.Mock(uuid=str(uuid4()), - properties={'capabilities': caps}, - provision_state=provision_state, - spec=['uuid', 'properties', 'provision_state']) - - def _test(self, expected_errors, expected_warnings, - assign_profiles=True, dry_run=False): - errors, warnings = utils.assign_and_verify_profiles(self.bm_client, - self.flavors, - assign_profiles, - dry_run) - self.assertEqual(errors, expected_errors) - self.assertEqual(warnings, expected_warnings) - - def test_no_matching_without_scale(self): - self.flavors = {name: (object(), 0) - for name in self.flavors} - self.nodes[:] = [self._get_fake_node(profile='fake'), - self._get_fake_node(profile='fake')] - - self._test(0, 0) - self.assertFalse(self.bm_client.node.update.called) - - def test_exact_match(self): - self.nodes[:] = [self._get_fake_node(profile='compute'), - self._get_fake_node(profile='control')] - - self._test(0, 0) - self.assertFalse(self.bm_client.node.update.called) - - def test_nodes_with_no_profiles_present(self): - self.nodes[:] = [self._get_fake_node(profile='compute'), - self._get_fake_node(profile=None), - self._get_fake_node(profile='foobar'), - self._get_fake_node(profile='control')] - - self._test(0, 1) - self.assertFalse(self.bm_client.node.update.called) - - def test_more_nodes_with_profiles_present(self): - self.nodes[:] = [self._get_fake_node(profile='compute'), - self._get_fake_node(profile='compute'), - self._get_fake_node(profile='compute'), - self._get_fake_node(profile='control')] - - self._test(0, 1) - self.assertFalse(self.bm_client.node.update.called) - - def test_no_nodes(self): - # One error per each flavor - self._test(2, 0) - self.assertFalse(self.bm_client.node.update.called) - - def test_not_enough_nodes(self): - self.nodes[:] = [self._get_fake_node(profile='compute')] - self._test(1, 0) - self.assertFalse(self.bm_client.node.update.called) - - def test_assign_profiles(self): - self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']), - self._get_fake_node(possible_profiles=['control']), - self._get_fake_node(possible_profiles=['compute'])] - - # one warning for a redundant node - self._test(0, 1, assign_profiles=True) - self.assertEqual(2, self.bm_client.node.update.call_count) - - actual_profiles = [utils.node_get_capabilities(node).get('profile') - for node in self.nodes] - actual_profiles.sort(key=lambda x: str(x)) - self.assertEqual([None, 'compute', 'control'], actual_profiles) - - def test_assign_profiles_multiple_options(self): - self.nodes[:] = [self._get_fake_node(possible_profiles=['compute', - 'control']), - self._get_fake_node(possible_profiles=['compute', - 'control'])] - - self._test(0, 0, assign_profiles=True) - self.assertEqual(2, self.bm_client.node.update.call_count) - - actual_profiles = [utils.node_get_capabilities(node).get('profile') - for node in self.nodes] - actual_profiles.sort(key=lambda x: str(x)) - self.assertEqual(['compute', 'control'], actual_profiles) - - def test_assign_profiles_not_enough(self): - self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']), - self._get_fake_node(possible_profiles=['compute']), - self._get_fake_node(possible_profiles=['compute'])] - - self._test(1, 1, assign_profiles=True) - # no node update for failed flavor - self.assertEqual(1, self.bm_client.node.update.call_count) - - actual_profiles = [utils.node_get_capabilities(node).get('profile') - for node in self.nodes] - actual_profiles.sort(key=lambda x: str(x)) - self.assertEqual([None, None, 'compute'], actual_profiles) - - def test_assign_profiles_dry_run(self): - self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']), - self._get_fake_node(possible_profiles=['control']), - self._get_fake_node(possible_profiles=['compute'])] - - self._test(0, 1, dry_run=True) - self.assertFalse(self.bm_client.node.update.called) - - actual_profiles = [utils.node_get_capabilities(node).get('profile') - for node in self.nodes] - self.assertEqual([None] * 3, actual_profiles) - - def test_scale(self): - # active nodes with assigned profiles are fine - self.nodes[:] = [self._get_fake_node(profile='compute', - provision_state='active'), - self._get_fake_node(profile='control')] - - self._test(0, 0, assign_profiles=True) - self.assertFalse(self.bm_client.node.update.called) - - def test_assign_profiles_wrong_state(self): - # active nodes are not considered for assigning profiles - self.nodes[:] = [self._get_fake_node(possible_profiles=['compute'], - provision_state='active'), - self._get_fake_node(possible_profiles=['control'], - provision_state='cleaning'), - self._get_fake_node(profile='compute', - provision_state='error')] - - self._test(2, 1, assign_profiles=True) - self.assertFalse(self.bm_client.node.update.called) - - def test_no_spurious_warnings(self): - self.nodes[:] = [self._get_fake_node(profile=None)] - self.flavors = {'baremetal': (fakes.FakeFlavor('baremetal', None), 1)} - self._test(0, 0) - - -class TestPromptUser(TestCase): - def setUp(self): - super(TestPromptUser, self).setUp() - self.logger = mock.MagicMock() - self.logger.info = mock.MagicMock() - - @mock.patch('sys.stdin') - def test_user_accepts(self, stdin_mock): - stdin_mock.isatty.return_value = True - stdin_mock.readline.return_value = "yes" - result = utils.prompt_user_for_confirmation("[y/N]?", self.logger) - self.assertTrue(result) - - @mock.patch('sys.stdin') - def test_user_declines(self, stdin_mock): - stdin_mock.isatty.return_value = True - stdin_mock.readline.return_value = "no" - result = utils.prompt_user_for_confirmation("[y/N]?", self.logger) - self.assertFalse(result) - - @mock.patch('sys.stdin') - def test_user_no_tty(self, stdin_mock): - stdin_mock.isatty.return_value = False - stdin_mock.readline.return_value = "yes" - result = utils.prompt_user_for_confirmation("[y/N]?", self.logger) - self.assertFalse(result) - - @mock.patch('sys.stdin') - def test_user_aborts_control_c(self, stdin_mock): - stdin_mock.isatty.return_value = False - stdin_mock.readline.side_effect = KeyboardInterrupt() - result = utils.prompt_user_for_confirmation("[y/N]?", self.logger) - self.assertFalse(result) - - @mock.patch('sys.stdin') - def test_user_aborts_with_control_d(self, stdin_mock): - stdin_mock.isatty.return_value = False - stdin_mock.readline.side_effect = EOFError() - result = utils.prompt_user_for_confirmation("[y/N]?", self.logger) - self.assertFalse(result) - - -class TestReplaceLinks(TestCase): - - def setUp(self): - super(TestReplaceLinks, self).setUp() - self.link_replacement = { - 'file:///home/stack/test.sh': - 'user-files/home/stack/test.sh', - 'file:///usr/share/extra-templates/my.yml': - 'user-files/usr/share/extra-templates/my.yml', - } - - def test_replace_links(self): - source = ( - 'description: my template\n' - 'heat_template_version: "2014-10-16"\n' - 'parameters:\n' - ' foo:\n' - ' default: ["bar"]\n' - ' type: json\n' - ' bar:\n' - ' default: []\n' - 'resources:\n' - ' test_config:\n' - ' properties:\n' - ' config: {get_file: "file:///home/stack/test.sh"}\n' - ' type: OS::Heat::SoftwareConfig\n' - ) - expected = ( - 'description: my template\n' - 'heat_template_version: "2014-10-16"\n' - 'parameters:\n' - ' foo:\n' - ' default: ["bar"]\n' - ' type: json\n' - ' bar:\n' - ' default: []\n' - 'resources:\n' - ' test_config:\n' - ' properties:\n' - ' config: {get_file: user-files/home/stack/test.sh}\n' - ' type: OS::Heat::SoftwareConfig\n' - ) - - # the yaml->string dumps aren't always character-precise, so - # we need to parse them into dicts for comparison - expected_dict = yaml.safe_load(expected) - result_dict = yaml.safe_load(utils.replace_links_in_template_contents( - source, self.link_replacement)) - self.assertEqual(expected_dict, result_dict) - - def test_replace_links_not_template(self): - # valid JSON/YAML, but doesn't have heat_template_version - source = '{"get_file": "file:///home/stack/test.sh"}' - self.assertEqual( - source, - utils.replace_links_in_template_contents( - source, self.link_replacement)) - - def test_replace_links_not_yaml(self): - # invalid JSON/YAML -- curly brace left open - source = '{"invalid JSON"' - self.assertEqual( - source, - utils.replace_links_in_template_contents( - source, self.link_replacement)) - - def test_relative_link_replacement(self): - current_dir = 'user-files/home/stack' - expected = { - 'file:///home/stack/test.sh': - 'test.sh', - 'file:///usr/share/extra-templates/my.yml': - '../../usr/share/extra-templates/my.yml', - } - self.assertEqual(expected, utils.relative_link_replacement( - self.link_replacement, current_dir)) - - -class TestBracketIPV6(TestCase): - def test_basic(self): - result = utils.bracket_ipv6('::1') - self.assertEqual('[::1]', result) - - def test_hostname(self): - result = utils.bracket_ipv6('hostname') - self.assertEqual('hostname', result) - - def test_already_bracketed(self): - result = utils.bracket_ipv6('[::1]') - self.assertEqual('[::1]', result) - - -class TestIsValidIP(TestCase): - def test_with_valid_ipv4(self): - result = utils.is_valid_ip('192.168.0.1') - self.assertEqual(True, result) - - def test_with_valid_ipv6(self): - result = utils.is_valid_ip('::1') - self.assertEqual(True, result) - - def test_with_invalid_ip(self): - result = utils.is_valid_ip('192.168.1%bad') - self.assertEqual(False, result) - - -class TestIsLoopback(TestCase): - def test_with_loopback(self): - result = utils.is_loopback('127.0.0.1') - self.assertEqual(True, result) - - def test_with_no_loopback(self): - result = utils.is_loopback('10.0.0.1') - self.assertEqual(False, result) - - -class TestGetHostIps(TestCase): - def test_get_host_ips(self): - with mock.patch.object(socket, 'getaddrinfo') as mock_addrinfo: - mock_addrinfo.return_value = [('', '', 6, '', ('127.0.0.1', 0))] - result = utils.get_host_ips('myhost.domain') - self.assertEqual(['127.0.0.1'], result) - - -class TestGetSingleIp(TestCase): - def test_with_fqdn_and_valid_ip(self): - with mock.patch.object(utils, 'get_host_ips') as mock_gethostips: - mock_gethostips.return_value = ['192.168.0.1'] - result = utils.get_single_ip('myhost.domain') - self.assertEqual('192.168.0.1', result) - - def test_with_fqdn_and_loopback(self): - with mock.patch.object(utils, 'get_host_ips') as mock_gethostips: - mock_gethostips.return_value = ['127.0.0.1'] - self.assertRaises(exceptions.LookupError, - utils.get_single_ip, 'myhost.domain') - - def test_with_too_much_ips(self): - with mock.patch.object(utils, 'get_host_ips') as mock_gethostips: - mock_gethostips.return_value = ['192.168.0.1', '192.168.0.2'] - self.assertRaises(exceptions.LookupError, - utils.get_single_ip, 'myhost.domain') - - def test_without_ip(self): - with mock.patch.object(utils, 'get_host_ips') as mock_gethostips: - mock_gethostips.return_value = [] - self.assertRaises(exceptions.LookupError, - utils.get_single_ip, 'myhost.domain') - - def test_with_invalid_ip(self): - with mock.patch.object(utils, 'get_host_ips') as mock_gethostips: - mock_gethostips.return_value = ['192.168.23.x'] - self.assertRaises(exceptions.LookupError, - utils.get_single_ip, 'myhost.domain') - - -class TestStoreCliParam(TestCase): - - def setUp(self): - class ArgsFake(object): - def __init__(self): - self.a = 1 - - self.args = ArgsFake() - - @mock.patch('os.path.isdir') - @mock.patch('os.chown') - @mock.patch('os.mkdir') - def test_non_directory_exists(self, mock_mkdir, mock_chown, mock_isdir): - mock_isdir.return_value = False - self.assertRaises(exceptions.InvalidConfiguration, - utils.store_cli_param, - "overcloud deploy", self.args) - - @mock.patch('tripleoclient.utils.datetime') - @mock.patch('os.path.isdir') - @mock.patch('os.chown') - @mock.patch('os.mkdir') - def test_directory_exists(self, mock_mkdir, mock_chown, mock_isdir, - mock_date): - history_path = os.path.join(os.path.expanduser("~"), '.tripleo') - mock_mkdir.side_effect = OSError(errno.EEXIST, 'error') - mock_isdir.return_value = True - mock_file = mock.mock_open() - mock_date.datetime.now.return_value = datetime.datetime(2017, 11, 22) - - with mock.patch("builtins.open", mock_file): - utils.store_cli_param("overcloud plan list", self.args) - - expected_call = [ - mock.call("%s/history" % history_path, 'a'), - mock.call().write('2017-11-22 00:00:00 overcloud-plan-list a=1 \n') - ] - mock_file.assert_has_calls(expected_call, any_order=True) - - @mock.patch('os.path.isdir') - @mock.patch('os.chown') - @mock.patch('os.mkdir') - def test_directory_fail(self, mock_mkdir, mock_chown, mock_isdir): - mock_mkdir.side_effect = OSError() - with self.assertRaises(IOError): - utils.store_cli_param("overcloud plan list", self.args) - mock_chown.assert_not_called() - mock_isdir.assert_not_called() - - @mock.patch('tripleoclient.utils.datetime') - @mock.patch('os.path.isdir') - @mock.patch('os.chown') - @mock.patch('os.mkdir') - def test_write_cli_param(self, mock_mkdir, mock_chown, mock_isdir, - mock_date): - history_path = os.path.join(os.path.expanduser("~"), '.tripleo') - mock_isdir.return_value = True - mock_file = mock.mock_open() - mock_date.datetime.now.return_value = datetime.datetime(2017, 11, 22) - - with mock.patch("builtins.open", mock_file): - utils.store_cli_param("overcloud plan list", self.args) - - expected_call = [ - mock.call("%s/history" % history_path, 'a'), - mock.call().write('2017-11-22 00:00:00 overcloud-plan-list a=1 \n') - ] - mock_file.assert_has_calls(expected_call, any_order=True) - - @mock.patch('builtins.open') - @mock.patch('os.path.isdir') - @mock.patch('os.chown') - @mock.patch('os.mkdir') - def test_fail_to_write_data(self, mock_mkdir, mock_chown, mock_isdir, - mock_open): - mock_isdir.return_value = True - mock_open.side_effect = IOError() - with self.assertRaises(IOError): - utils.store_cli_param("command", self.args) - - -class ProcessMultipleEnvironments(TestCase): - - def setUp(self): - self.tht_root = '/twd/templates' - self.user_tht_root = '/tmp/thtroot/' - self.created_env_files = [ - './inside.yaml', '/tmp/thtroot/abs.yaml', - '/tmp/thtroot/puppet/foo.yaml', - '/tmp/thtroot/environments/myenv.yaml', - '/tmp/thtroot42/notouch.yaml', - './tmp/thtroot/notouch2.yaml', - '../outside.yaml'] - - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'get_template_contents', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.environment_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('heatclient.common.template_format.' - 'parse', autospec=True, return_value=dict()) - def test_redirect_templates_paths(self, - mock_hc_templ_parse, - mock_hc_env_parse, - mock_hc_get_templ_cont, - mock_hc_process): - utils.process_multiple_environments(self.created_env_files, - self.tht_root, - self.user_tht_root) - - mock_hc_process.assert_has_calls([ - mock.call(env_path='./inside.yaml', - include_env_in_files=False), - mock.call(env_path='/twd/templates/abs.yaml', - include_env_in_files=False), - mock.call(env_path='/twd/templates/puppet/foo.yaml', - include_env_in_files=False), - mock.call(env_path='/twd/templates/environments/myenv.yaml', - include_env_in_files=False), - mock.call(env_path='/tmp/thtroot42/notouch.yaml', - include_env_in_files=False), - mock.call(env_path='./tmp/thtroot/notouch2.yaml', - include_env_in_files=False), - mock.call(env_path='../outside.yaml', - include_env_in_files=False)]) - - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'get_template_contents', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.environment_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('heatclient.common.template_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('yaml.safe_dump', autospec=True) - @mock.patch('yaml.safe_load', autospec=True) - @mock.patch('builtins.open') - @mock.patch('tempfile.NamedTemporaryFile', autospec=True) - def test_rewrite_env_files(self, - mock_temp, mock_open, - mock_yaml_load, - mock_yaml_dump, - mock_hc_templ_parse, - mock_hc_env_parse, - mock_hc_get_templ_cont, - mock_hc_process): - - def hc_process(*args, **kwargs): - if 'abs.yaml' in kwargs['env_path']: - raise hc_exc.CommandError - else: - return ({}, {}) - - mock_hc_process.side_effect = hc_process - rewritten_env = {'resource_registry': { - 'OS::Foo::Bar': '/twd/outside.yaml', - 'OS::Foo::Baz': '/twd/templates/inside.yaml', - 'OS::Foo::Qux': '/twd/templates/abs.yaml', - 'OS::Foo::Quux': '/tmp/thtroot42/notouch.yaml', - 'OS::Foo::Corge': '/twd/templates/puppet/foo.yaml' - } - } - myenv = {'resource_registry': { - 'OS::Foo::Bar': '../outside.yaml', - 'OS::Foo::Baz': './inside.yaml', - 'OS::Foo::Qux': '/tmp/thtroot/abs.yaml', - 'OS::Foo::Quux': '/tmp/thtroot42/notouch.yaml', - 'OS::Foo::Corge': '/tmp/thtroot/puppet/foo.yaml' - } - } - mock_yaml_load.return_value = myenv - - utils.process_multiple_environments(self.created_env_files, - self.tht_root, - self.user_tht_root, None, False) - - mock_yaml_dump.assert_has_calls([mock.call(rewritten_env, - default_flow_style=False)]) - - -class GetTripleoAnsibleInventory(TestCase): - - def setUp(self): - super(GetTripleoAnsibleInventory, self).setUp() - self.inventory_file = '' - self.ssh_user = 'heat_admin' - self.stack = 'foo-overcloud' - - @mock.patch('tripleoclient.utils.get_tripleo_ansible_inventory', - autospec=True) - def test_get_tripleo_ansible_inventory(self, mock_inventory): - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = True - - self.cmd = utils.get_tripleo_ansible_inventory( - inventory_file=self.inventory_file, - ssh_user=self.ssh_user, - stack=self.stack) - - self.cmd.take_action() - - mock_inventory.assert_called_once_with( - inventory_file='', - ssh_user='heat_admin', - stack='foo-overcloud' - ) - - -class TestNormalizeFilePath(TestCase): - - @mock.patch('os.path.isfile', return_value=True) - def test_norm_path_abs(self, mock_exists): - self.assertEqual( - utils.rel_or_abs_path('/foobar.yaml', '/tmp'), - '/foobar.yaml') - - @mock.patch('os.path.isfile', side_effect=[False, True]) - def test_norm_path_rel(self, mock_exists): - self.assertEqual( - utils.rel_or_abs_path('baz/foobar.yaml', '/bar'), - '/bar/baz/foobar.yaml') - - -class TestFetchRolesFile(TestCase): - - @mock.patch('os.path.exists', return_value=True) - def test_fetch_roles_file(self, mock_exists): - with tempfile.NamedTemporaryFile(mode='w') as roles_file: - yaml.dump([{'name': 'Foobar'}], roles_file) - with mock.patch('tripleoclient.utils.rel_or_abs_path') as mock_rf: - mock_rf.return_value = roles_file.name - self.assertEqual(utils.fetch_roles_file(roles_file.name), - [{'name': 'Foobar'}]) - - -class TestOvercloudNameScenarios(TestWithScenarios): - scenarios = [ - ('kernel_default', - dict(func=utils.overcloud_kernel, - basename='overcloud-full', - expected=('overcloud-full-vmlinuz', '.vmlinuz'))), - ('kernel_arch', - dict(func=utils.overcloud_kernel, - basename='overcloud-full', - arch='x86_64', - expected=('x86_64-overcloud-full-vmlinuz', '.vmlinuz'))), - ('kernel_arch_platform', - dict(func=utils.overcloud_kernel, - basename='overcloud-full', - arch='x86_64', - platform='SNB', - expected=('SNB-x86_64-overcloud-full-vmlinuz', '.vmlinuz'))), - ('kernel_platform', - dict(func=utils.overcloud_kernel, - basename='overcloud-full', - platform='SNB', - expected=('overcloud-full-vmlinuz', '.vmlinuz'))), - ('ramdisk_default', - dict(func=utils.overcloud_ramdisk, - basename='overcloud-full', - expected=('overcloud-full-initrd', '.initrd'))), - ('ramdisk_arch', - dict(func=utils.overcloud_ramdisk, - basename='overcloud-full', - arch='x86_64', - expected=('x86_64-overcloud-full-initrd', '.initrd'))), - ('ramdisk_arch_platform', - dict(func=utils.overcloud_ramdisk, - basename='overcloud-full', - arch='x86_64', - platform='SNB', - expected=('SNB-x86_64-overcloud-full-initrd', '.initrd'))), - ('ramdisk_platform', - dict(func=utils.overcloud_ramdisk, - basename='overcloud-full', - platform='SNB', - expected=('overcloud-full-initrd', '.initrd'))), - ('image_default', - dict(func=utils.overcloud_image, - basename='overcloud-full', - expected=('overcloud-full', '.raw'))), - ('image_arch', - dict(func=utils.overcloud_image, - basename='overcloud-full', - arch='x86_64', - expected=('x86_64-overcloud-full', '.raw'))), - ('image_arch_platform', - dict(func=utils.overcloud_image, - basename='overcloud-full', - arch='x86_64', - platform='SNB', - expected=('SNB-x86_64-overcloud-full', '.raw'))), - ('image_platform', - dict(func=utils.overcloud_image, - basename='overcloud-full', - platform='SNB', - expected=('overcloud-full', '.raw'))), - ] - - def test_overcloud_params(self): - kwargs = dict() - for attr in ['arch', 'platform']: - if hasattr(self, attr): - kwargs[attr] = getattr(self, attr) - - if kwargs: - observed = self.func(self.basename, **kwargs) - else: - observed = self.func(self.basename) - - self.assertEqual(self.expected, observed) - - -class TestDeployNameScenarios(TestWithScenarios): - scenarios = [ - ('kernel_default', - dict(func=utils.deploy_kernel, - expected='agent.kernel')), - ('kernel_arch', - dict(func=utils.deploy_kernel, - arch='x86_64', - expected='x86_64/agent.kernel')), - ('kernel_arch_platform', - dict(func=utils.deploy_kernel, - arch='x86_64', - platform='SNB', - expected='SNB-x86_64/agent.kernel')), - ('kernel_platform', - dict(func=utils.deploy_kernel, - platform='SNB', - expected='agent.kernel')), - ('ramdisk_default', - dict(func=utils.deploy_ramdisk, - expected='agent.ramdisk')), - ('ramdisk_arch', - dict(func=utils.deploy_ramdisk, - arch='x86_64', - expected='x86_64/agent.ramdisk')), - ('ramdisk_arch_platform', - dict(func=utils.deploy_ramdisk, - arch='x86_64', - platform='SNB', - expected='SNB-x86_64/agent.ramdisk')), - ('ramdisk_platform', - dict(func=utils.deploy_ramdisk, - platform='SNB', - expected='agent.ramdisk')), - ] - - def test_deploy_params(self): - kwargs = {} - for attr in ['arch', 'platform']: - if hasattr(self, attr): - kwargs[attr] = getattr(self, attr) - - if kwargs: - observed = self.func(**kwargs) - else: - observed = self.func() - - self.assertEqual(self.expected, observed) - - -class TestDeploymentPythonInterpreter(TestCase): - def test_system_default(self): - args = mock.MagicMock() - args.deployment_python_interpreter = None - py = utils.get_deployment_python_interpreter(args) - self.assertEqual(py, sys.executable) - - def test_provided_interpreter(self): - args = mock.MagicMock() - args.deployment_python_interpreter = 'foo' - py = utils.get_deployment_python_interpreter(args) - self.assertEqual(py, 'foo') - - -class TestWaitApiPortReady(TestCase): - @mock.patch('urllib.request.urlopen') - def test_success(self, urlopen_mock): - has_errors = utils.wait_api_port_ready(8080) - self.assertFalse(has_errors) - - @mock.patch( - 'urllib.request.urlopen', - side_effect=[ - url_error.HTTPError("", 201, None, None, None), socket.timeout, - url_error.URLError("") - ] * 10) - @mock.patch('time.sleep') - def test_throw_exception_at_max_retries(self, urlopen_mock, sleep_mock): - with self.assertRaises(RuntimeError): - utils.wait_api_port_ready(8080) - self.assertEqual(urlopen_mock.call_count, 30) - self.assertEqual(sleep_mock.call_count, 30) - - @mock.patch( - 'urllib.request.urlopen', - side_effect=[ - socket.timeout, - url_error.URLError(""), - url_error.HTTPError("", 201, None, None, None), None - ]) - @mock.patch('time.sleep') - def test_recovers_from_exception(self, urlopen_mock, sleep_mock): - self.assertFalse(utils.wait_api_port_ready(8080)) - self.assertEqual(urlopen_mock.call_count, 4) - self.assertEqual(sleep_mock.call_count, 4) - - @mock.patch( - 'urllib.request.urlopen', - side_effect=[ - socket.timeout, - url_error.URLError(""), - url_error.HTTPError("", 300, None, None, None) - ] * 10) - @mock.patch('time.sleep') - def test_recovers_from_multiple_choices_error_code(self, urlopen_mock, - sleep_mock): - self.assertTrue(utils.wait_api_port_ready(8080)) - self.assertEqual(urlopen_mock.call_count, 3) - self.assertEqual(sleep_mock.call_count, 3) - - @mock.patch('urllib.request.urlopen', side_effect=NameError) - @mock.patch('time.sleep') - def test_dont_retry_at_unknown_exception(self, urlopen_mock, sleep_mock): - with self.assertRaises(NameError): - utils.wait_api_port_ready(8080) - self.assertEqual(urlopen_mock.call_count, 1) - self.assertEqual(sleep_mock.call_count, 1) - - -class TestCheckHostname(TestCase): - @mock.patch('tripleoclient.utils.run_command') - def test_hostname_ok(self, mock_run): - mock_run.side_effect = ['host.domain', 'host.domain'] - mock_open_ctx = mock.mock_open(read_data='127.0.0.1 host.domain') - with mock.patch('tripleoclient.utils.open', mock_open_ctx): - utils.check_hostname(False) - run_calls = [ - mock.call(['hostnamectl', '--static'], name='hostnamectl'), - mock.call(['hostnamectl', '--transient'], name='hostnamectl')] - self.assertEqual(mock_run.mock_calls, run_calls) - - @mock.patch('tripleoclient.utils.run_command') - def test_hostname_fix_hosts_ok(self, mock_run): - mock_run.side_effect = ['host.domain', 'host.domain', ''] - mock_open_ctx = mock.mock_open(read_data='') - with mock.patch('tripleoclient.utils.open', mock_open_ctx): - utils.check_hostname(True) - sed_cmd = 'sed -i "s/127.0.0.1\\(\\s*\\)/127.0.0.1\\\\1host.domain ' \ - 'host /" /etc/hosts' - run_calls = [ - mock.call(['hostnamectl', '--static'], name='hostnamectl'), - mock.call(['hostnamectl', '--transient'], name='hostnamectl'), - mock.call(['sudo', '/bin/bash', '-c', sed_cmd], - name='hostname-to-etc-hosts')] - import pprint - pprint.pprint(mock_run.mock_calls) - self.assertEqual(mock_run.mock_calls, run_calls) - - @mock.patch('tripleoclient.utils.run_command') - def test_hostname_mismatch_fail(self, mock_run): - mock_run.side_effect = ['host.domain', ''] - self.assertRaises(RuntimeError, utils.check_hostname) - - @mock.patch('tripleoclient.utils.run_command') - def test_hostname_short_fail(self, mock_run): - mock_run.side_effect = ['host', 'host'] - self.assertRaises(RuntimeError, utils.check_hostname) - - -class TestCheckEnvForProxy(TestCase): - def test_no_proxy(self): - utils.check_env_for_proxy() - - @mock.patch.dict(os.environ, - {'http_proxy': 'foo:1111', - 'no_proxy': 'foo'}) - def test_http_proxy_ok(self): - utils.check_env_for_proxy(['foo']) - - @mock.patch.dict(os.environ, - {'https_proxy': 'bar:1111', - 'no_proxy': 'foo,bar'}) - def test_https_proxy_ok(self): - utils.check_env_for_proxy(['foo', 'bar']) - - @mock.patch.dict(os.environ, - {'http_proxy': 'foo:1111', - 'https_proxy': 'bar:1111', - 'no_proxy': 'foobar'}) - def test_proxy_fail(self): - self.assertRaises(RuntimeError, - utils.check_env_for_proxy, - ['foo', 'bar']) - - @mock.patch.dict(os.environ, - {'http_proxy': 'foo:1111', - 'https_proxy': 'bar:1111', - 'no_proxy': 'foobar'}) - def test_proxy_fail_partial_match(self): - self.assertRaises(RuntimeError, - utils.check_env_for_proxy, - ['foo', 'bar']) - - @mock.patch.dict(os.environ, - {'http_proxy': 'foo:1111', - 'https_proxy': 'bar:1111'}) - def test_proxy_fail_no_proxy_unset(self): - self.assertRaises(RuntimeError, - utils.check_env_for_proxy, - ['foo', 'bar']) - - -class TestConfigParser(TestCase): - - def setUp(self): - self.tmp_dir = tempfile.mkdtemp() - - def tearDown(self): - if self.tmp_dir: - shutil.rmtree(self.tmp_dir) - self.tmp_dir = None - - def test_get_config_value(self): - cfg = ConfigParser() - cfg.add_section('foo') - cfg.set('foo', 'bar', 'baz') - config = utils.get_from_cfg(cfg, 'bar', 'foo') - self.assertEqual(config, 'baz') - - def test_getboolean_config_value(self): - cfg = ConfigParser() - cfg.add_section('foo') - test_data_set = [ - (True, 'True'), - (True, 'true'), - (False, 'False'), - (False, 'false') - ] - for test_data in test_data_set: - expected_value, config_value = test_data - cfg.set('foo', 'bar', config_value) - obtained_value = utils.getboolean_from_cfg(cfg, 'bar', 'foo') - self.assertEqual(obtained_value, expected_value) - - def test_getboolean_bad_config_value(self): - cfg = ConfigParser() - cfg.add_section('foo') - cfg.set('foo', 'bar', 'I am not a boolean') - self.assertRaises(exceptions.NotFound, - utils.getboolean_from_cfg, - cfg, 'bar', 'foo') - - def test_get_config_value_multiple_files(self): - _, cfile1_name = tempfile.mkstemp(dir=self.tmp_dir, text=True) - _, cfile2_name = tempfile.mkstemp(dir=self.tmp_dir, text=True) - cfiles = [cfile1_name, cfile2_name] - cfg = ConfigParser() - cfg.add_section('foo') - cfg.set('foo', 'bar', 'baz') - with open(cfile1_name, 'w') as fp: - cfg.write(fp) - cfg.set('foo', 'bar', 'boop') - with open(cfile2_name, 'w') as fp: - cfg.write(fp) - cfgs = utils.get_read_config(cfiles) - config = utils.get_from_cfg(cfgs, 'bar', 'foo') - self.assertEqual(config, 'boop') - - def test_get_config_value_bad_file(self): - self.assertRaises(AttributeError, - utils.get_from_cfg, - 'does-not-exist', 'bar', 'foo') - - -class TestGetLocalTimezone(TestCase): - @mock.patch('tripleoclient.utils.run_command') - def test_get_local_timezone(self, run_mock): - run_mock.return_value = "" \ - " Local time: Thu 2019-03-14 12:05:49 EDT\n" \ - " Universal time: Thu 2019-03-14 16:05:49 UTC\n" \ - " RTC time: Thu 2019-03-14 16:15:50\n" \ - " Time zone: America/New_York (EDT, -0400)\n" \ - "System clock synchronized: yes\n" \ - " NTP service: active\n"\ - " RTC in local TZ: no\n" - self.assertEqual('America/New_York', utils.get_local_timezone()) - - @mock.patch('tripleoclient.utils.run_command') - def test_get_local_timezone_bad_timedatectl(self, run_mock): - run_mock.return_value = "meh" - self.assertEqual('UTC', utils.get_local_timezone()) - - @mock.patch('tripleoclient.utils.run_command') - def test_get_local_timezone_bad_timezone_line(self, run_mock): - run_mock.return_value = "" \ - " Time zone: " - self.assertEqual('UTC', utils.get_local_timezone()) - - -class TestParseExtraVars(TestCase): - def test_simple_case_text_format(self): - input_parameter = ['key1=val1', 'key2=val2 key3=val3'] - expected = { - 'key1': 'val1', - 'key2': 'val2', - 'key3': 'val3' - } - result = utils.parse_extra_vars(input_parameter) - self.assertEqual(result, expected) - - def test_simple_case_json_format(self): - input_parameter = ['{"key1": "val1", "key2": "val2"}'] - expected = { - 'key1': 'val1', - 'key2': 'val2' - } - result = utils.parse_extra_vars(input_parameter) - self.assertEqual(result, expected) - - def test_multiple_format(self): - input_parameter = [ - 'key1=val1', 'key2=val2 key3=val3', - '{"key4": "val4", "key5": "val5"}'] - expected = { - 'key1': 'val1', - 'key2': 'val2', - 'key3': 'val3', - 'key4': 'val4', - 'key5': 'val5' - } - result = utils.parse_extra_vars(input_parameter) - self.assertEqual(result, expected) - - def test_same_key(self): - input_parameter = [ - 'key1=val1', 'key2=val2 key3=val3', - '{"key1": "other_value", "key5": "val5"}'] - expected = { - 'key1': 'other_value', - 'key2': 'val2', - 'key3': 'val3', - 'key5': 'val5' - } - result = utils.parse_extra_vars(input_parameter) - self.assertEqual(result, expected) - - def test_with_multiple_space(self): - input_parameter = ['key1=val1', ' key2=val2 key3=val3 '] - expected = { - 'key1': 'val1', - 'key2': 'val2', - 'key3': 'val3' - } - result = utils.parse_extra_vars(input_parameter) - self.assertEqual(result, expected) - - def test_invalid_string(self): - input_parameter = [ - 'key1=val1', 'key2=val2 key3=val3', - '{"key1": "other_value", "key5": "val5'] - self.assertRaises( - ValueError, utils.parse_extra_vars, input_parameter) - - def test_invalid_format(self): - input_parameter = ['key1 val1'] - self.assertRaises( - ValueError, utils.parse_extra_vars, input_parameter) - - -class TestGeneralUtils(base.TestCommand): - - def setUp(self): - super(TestGeneralUtils, self).setUp() - - @mock.patch('tripleoclient.utils.safe_write') - def test_update_deployment_status(self, mock_write): - mock_status = { - 'deployment_status': 'TESTING' - } - utils.update_deployment_status( - 'overcloud', - mock_status, - '' - ) - mock_write.assert_called() - - def test_playbook_limit_parse(self): - limit_nodes = 'controller0, compute0:compute1,!compute2' - limit_hosts_expected = 'controller0:compute0:compute1:!compute2' - limit_hosts_actual = utils.playbook_limit_parse(limit_nodes) - self.assertEqual(limit_hosts_actual, limit_hosts_expected) - - -class TestTempDirs(base.TestCase): - - @mock.patch('tripleoclient.utils.tempfile.mkdtemp', - autospec=True, - return_value='foo') - @mock.patch('tripleoclient.utils.Pushd', autospec=True) - def test_init_dirpath(self, mock_pushd, mock_mkdtemp): - - utils.TempDirs(dir_path='bar') - - mock_pushd.assert_called_once_with(directory='foo') - mock_mkdtemp.assert_called_once_with( - dir='bar', - prefix='tripleo') - - @mock.patch('tripleoclient.utils.tempfile.mkdtemp', - autospec=True, - return_value='foo') - @mock.patch('tripleoclient.utils.Pushd', autospec=True,) - def test_init_no_prefix(self, mock_pushd, mock_mkdtemp): - - utils.TempDirs(dir_prefix=None) - - mock_pushd.assert_called_once_with(directory='foo') - mock_mkdtemp.assert_called_once_with() - - @mock.patch('tripleoclient.utils.LOG.warning', autospec=True,) - @mock.patch('tripleoclient.utils.tempfile.mkdtemp', - autospec=True, - return_value='foo') - @mock.patch('tripleoclient.utils.Pushd', autospec=True) - def test_exit_warning(self, mock_pushd, mock_mkdtemp, mock_log): - - temp_dirs = utils.TempDirs(cleanup=False, chdir=False) - - temp_dirs.__exit__() - - mock_log.assert_called_once_with( - "Not cleaning temporary directory [ foo ]") - - -class TestGetCtlplaneAttrs(base.TestCase): - - @mock.patch('openstack.connect', autospec=True) - @mock.patch.object(openstack.connection, 'Connection', autospec=True) - def test_get_ctlplane_attrs_no_network(self, mock_conn, mock_connect): - mock_connect.return_value = mock_conn - mock_conn.network.find_network.return_value = None - expected = dict() - self.assertEqual(expected, utils.get_ctlplane_attrs()) - - @mock.patch('openstack.connect', autospec=True) - def test_get_ctlplane_attrs_no_config(self, mock_connect): - mock_connect.side_effect = openstack.exceptions.ConfigException - - expected = dict() - self.assertEqual(expected, utils.get_ctlplane_attrs()) - - @mock.patch('openstack.connect', autospec=True) - @mock.patch.object(openstack.connection, 'Connection', autospec=True) - def test_get_ctlplane_attrs(self, mock_conn, mock_connect): - mock_connect.return_value = mock_conn - fake_network = fakes.FakeNeutronNetwork( - name='net_name', - mtu=1440, - dns_domain='ctlplane.localdomain.', - tags=[], - subnet_ids=['subnet_id']) - fake_subnet = fakes.FakeNeutronSubnet( - id='subnet_id', - name='subnet_name', - cidr='192.168.24.0/24', - gateway_ip='192.168.24.1', - host_routes=[ - {'destination': '192.168.25.0/24', 'nexthop': '192.168.24.1'}], - dns_nameservers=['192.168.24.254'], - ip_version=4 - ) - mock_conn.network.find_network.return_value = fake_network - mock_conn.network.get_subnet.return_value = fake_subnet - expected = { - 'network': { - 'dns_domain': 'ctlplane.localdomain.', - 'mtu': 1440, - 'name': 'net_name', - 'tags': []}, - 'subnets': { - 'subnet_name': { - 'cidr': '192.168.24.0/24', - 'dns_nameservers': ['192.168.24.254'], - 'gateway_ip': '192.168.24.1', - 'host_routes': [{'destination': '192.168.25.0/24', - 'nexthop': '192.168.24.1'}], - 'ip_version': 4, - 'name': 'subnet_name'} - } - } - self.assertEqual(expected, utils.get_ctlplane_attrs()) - - -class TestGetHostEntry(base.TestCase): - - @mock.patch('subprocess.Popen', autospec=True) - def test_get_undercloud_host_entry(self, mock_popen): - mock_process = mock.Mock() - mock_hosts = { - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane': - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane', - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane\n' - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane': - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane', - '1.2.3.4 uc.ctlplane foo uc.ctlplane bar uc.ctlplane': - '1.2.3.4 uc.ctlplane foo bar' - } - for value, expected in mock_hosts.items(): - mock_process.communicate.return_value = (value, '') - mock_process.returncode = 0 - mock_popen.return_value = mock_process - self.assertEqual(expected, utils.get_undercloud_host_entry()) - - -class TestProhibitedOverrides(base.TestCommand): - - def setUp(self): - super(TestProhibitedOverrides, self).setUp() - self.tmp_dir = self.useFixture(fixtures.TempDir()) - - def test_extend_protected_overrides(self): - protected_overrides = { - 'registry_entries': {'OS::Foo::Bar': ['foo_bar_file']}} - output_path = self.tmp_dir.join('env-file.yaml') - fake_env = { - 'parameter_defaults': { - 'DeployedNetworkEnvironment': {'foo': 'bar'}}, - 'resource_registry': { - 'OS::TripleO::Network': 'foo'} - } - with open(output_path, 'w') as temp_file: - yaml.safe_dump(fake_env, temp_file) - - utils.extend_protected_overrides(protected_overrides, output_path) - self.assertEqual({ - 'registry_entries': { - 'OS::Foo::Bar': ['foo_bar_file'], - 'OS::TripleO::Network': [output_path]}}, - protected_overrides) - - def test_check_prohibited_overrides_with_conflict(self): - protected_overrides = { - 'registry_entries': {'OS::Foo::Bar': ['foo_bar_file']}} - user_env = self.tmp_dir.join('env-file01.yaml') - fake_env = {'parameter_defaults': {'foo_param': {'foo': 'bar'}}, - 'resource_registry': {'OS::Foo::Bar': 'foo'}} - with open(user_env, 'w') as temp_file: - yaml.safe_dump(fake_env, temp_file) - - self.assertRaises(exceptions.DeploymentError, - utils.check_prohibited_overrides, - protected_overrides, [(user_env, user_env)]) - self.assertRaisesRegex( - exceptions.DeploymentError, - 'ERROR: Protected resource registry overrides detected!', - utils.check_prohibited_overrides, - protected_overrides, [(user_env, user_env)]) - - def test_check_prohibited_overrides_with_no_conflict(self): - protected_overrides = { - 'registry_entries': {'OS::Foo::Bar': ['foo_bar_file']}} - user_env = self.tmp_dir.join('env-file01.yaml') - fake_env = {'parameter_defaults': {'bar_param': {'bar': 'foo'}}, - 'resource_registry': {'OS::Bar::Foo': 'bar'}} - with open(user_env, 'w') as temp_file: - yaml.safe_dump(fake_env, temp_file) - - self.assertIsNone( - utils.check_prohibited_overrides(protected_overrides, - [(user_env, user_env)])) - - def test_check_neutron_resources(self): - resource_registry = { - "a": "A", - "neutron": "OS::Neutron::Port" - } - environment = dict(resource_registry=resource_registry) - self.assertRaises( - exceptions.InvalidConfiguration, - utils.check_neutron_resources, - environment) - resource_registry["neutron"] = "OS::Neutron::Network" - self.assertRaises( - exceptions.InvalidConfiguration, - utils.check_neutron_resources, - environment) - resource_registry.pop("neutron") - self.assertIsNone(utils.check_neutron_resources(environment)) - - -class TestParseContainerImagePrepare(TestCase): - - fake_env = {'parameter_defaults': {'ContainerImagePrepare': - [{'push_destination': 'foo.com', 'set': - {'ceph_image': 'ceph', - 'ceph_namespace': 'quay.io:443/ceph', - 'ceph_tag': 'latest'}}], - 'ContainerImageRegistryCredentials': - {'quay.io:443': {'quay_username': - 'quay_password'}}}} - - def test_parse_container_image_prepare(self): - key = 'ContainerImagePrepare' - keys = ['ceph_namespace', 'ceph_image', 'ceph_tag'] - reg_expected = {'ceph_image': 'ceph', - 'ceph_namespace': 'quay.io:443/ceph', - 'ceph_tag': 'latest'} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_env, cfgfile) - reg_actual = \ - utils.parse_container_image_prepare(key, keys, - cfgfile.name) - self.assertEqual(reg_actual, reg_expected) - - def test_parse_container_image_prepare_push_dest(self): - key = 'ContainerImagePrepare' - keys = ['ceph_namespace', 'ceph_image', 'ceph_tag'] - push_sub_keys = ['ceph_namespace'] - reg_expected = {'ceph_image': 'ceph', - 'ceph_namespace': 'foo.com/ceph', - 'ceph_tag': 'latest', - 'push_destination_boolean': True} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_env, cfgfile) - reg_actual = \ - utils.parse_container_image_prepare(key, keys, - cfgfile.name, - push_sub_keys) - self.assertEqual(reg_actual, reg_expected) - - def test_parse_container_image_prepare_push_dest_no_slash(self): - # Cover case from https://bugs.launchpad.net/tripleo/+bug/1979554 - key = 'ContainerImagePrepare' - keys = ['ceph_namespace', 'ceph_image', 'ceph_tag'] - push_sub_keys = ['ceph_namespace'] - reg_expected = {'ceph_image': 'ceph', - 'ceph_namespace': 'foo.com', - 'ceph_tag': 'latest', - 'push_destination_boolean': True} - local_fake_env = self.fake_env - # Remove '/ceph' from 'quay.io:443/ceph' in local copy to - # make sure parse_container_image_prepare() can handle it - local_fake_env['parameter_defaults'][ - 'ContainerImagePrepare'][0]['set']['ceph_namespace'] \ - = 'quay.io:443' - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(local_fake_env, cfgfile) - reg_actual = \ - utils.parse_container_image_prepare(key, keys, - cfgfile.name, - push_sub_keys) - self.assertEqual(reg_actual, reg_expected) - - def test_parse_container_image_prepare_credentials(self): - key = 'ContainerImageRegistryCredentials' - keys = ['quay.io:443/ceph'] - reg_expected = {'registry_url': 'quay.io:443', - 'registry_username': 'quay_username', - 'registry_password': 'quay_password'} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_env, cfgfile) - reg_actual = \ - utils.parse_container_image_prepare(key, keys, - cfgfile.name) - self.assertEqual(reg_actual, reg_expected) - - -class TestWorkingDirDefaults(base.TestCase): - - def setUp(self): - super(TestWorkingDirDefaults, self).setUp() - self.working_dir = tempfile.mkdtemp() - self.stack = 'overcloud' - self.wd_roles_file = os.path.join( - self.working_dir, - utils.constants.WD_DEFAULT_ROLES_FILE_NAME.format(self.stack)) - self.wd_networks_file = os.path.join( - self.working_dir, - utils.constants.WD_DEFAULT_NETWORKS_FILE_NAME.format(self.stack)) - self.wd_vip_file = os.path.join( - self.working_dir, - utils.constants.WD_DEFAULT_VIP_FILE_NAME.format(self.stack)) - self.wd_barametal_file = os.path.join( - self.working_dir, - utils.constants.WD_DEFAULT_BAREMETAL_FILE_NAME.format(self.stack)) - - def tearDown(self): - super(TestWorkingDirDefaults, self).tearDown() - shutil.rmtree(self.working_dir) - - @mock.patch.object(utils, 'rewrite_ansible_playbook_paths', autospec=True) - @mock.patch.object(shutil, 'copy', autospec=True) - def test_update_working_dir_defaults(self, mock_shutil_copy, - mock_rewrite_ansible_playbook_paths): - args = mock.Mock() - args.stack = self.stack - args.templates = '/tht_root' - args.roles_file = '/dir/roles_file.yaml' - args.networks_file = '/dir/networks_file.yaml' - args.vip_file = '/dir/vip_file.yaml' - args.baremetal_deployment = '/dir/baremetal_deployment.yaml' - - utils.update_working_dir_defaults(self.working_dir, args) - - mock_shutil_copy.assert_has_calls( - [mock.call(args.baremetal_deployment, self.wd_barametal_file), - mock.call(args.roles_file, self.wd_roles_file), - mock.call(args.networks_file, self.wd_networks_file), - mock.call(args.vip_file, self.wd_vip_file)]) - - def test_rewrite_ansible_playbook_paths(self): - src = '/rel/path/baremetal.yaml' - dest = self.wd_barametal_file - roles = ''' - - name: Controller - ansible_playbooks: - - playbook: controller-playbook.yaml - - playbook: /abs/path/controller-playbook.yaml - - name: Compute - ansible_playbooks: - - playbook: compute-playbook.yaml - - playbook: /abs/path/compute-playbook.yaml - ''' - with open(dest, 'w') as f: - f.write(roles) - utils.rewrite_ansible_playbook_paths(src, dest) - with open(dest, 'r') as f: - data = yaml.safe_load(f.read()) - self.assertEqual(data[0]['ansible_playbooks'][0]['playbook'], - '/rel/path/controller-playbook.yaml') - self.assertEqual(data[0]['ansible_playbooks'][1]['playbook'], - '/abs/path/controller-playbook.yaml') - self.assertEqual(data[1]['ansible_playbooks'][0]['playbook'], - '/rel/path/compute-playbook.yaml') - self.assertEqual(data[1]['ansible_playbooks'][1]['playbook'], - '/abs/path/compute-playbook.yaml') - - -class TestGetCephNetworks(TestCase): - - fake_network_data_default = [] - - fake_network_data = [ - {'name': 'StorageCloud0', - 'name_lower': 'storage', - 'ip_subnet': '172.16.1.0/24', - 'ipv6_subnet': 'fd00:fd00:fd00:3000::/64'}, - {'name': 'StorageMgmtCloud0', - 'name_lower': 'storage_mgmt', - 'ip_subnet': '172.16.3.0/24', - 'ipv6_subnet': 'fd00:fd00:fd00:4000::/64'}] - - fake_network_data_subnet = [ - {'name': 'Storage', - 'name_lower': 'storage_cloud_0', - 'service_net_map_replace': 'storage', - 'subnets': - {'storage_cloud_0_subnet_0': - {'ip_subnet': '172.16.11.0/24'}}}, - {'name': 'Storage', - 'name_lower': 'storage_mgmt_cloud_0', - 'service_net_map_replace': 'storage_mgmt', - 'subnets': - {'storage_mgmt_cloud_0_subnet_0': - {'ip_subnet': '172.16.12.0/24'}}}] - - fake_double_subnet = yaml.safe_load(''' - - name: StorageMgmtCloud0 - name_lower: storage_mgmt_cloud_0 - service_net_map_replace: storage_mgmt - subnets: - storage_mgmt_cloud_0_subnet12: - ip_subnet: '172.16.12.0/24' - storage_mgmt_cloud_0_subnet13: - ip_subnet: '172.16.13.0/24' - - name: StorageCloud0 - name_lower: storage_cloud_0 - service_net_map_replace: storage - subnets: - storage_cloud_0_subnet14: - ip_subnet: '172.16.14.0/24' - storage_cloud_0_subnet15: - ip_subnet: '172.16.15.0/24' - ''') - - def test_network_data_default(self): - expected = {'cluster_network': '192.168.24.0/24', - 'cluster_network_name': 'ctlplane', - 'public_network': '192.168.24.0/24', - 'public_network_name': 'ctlplane', - 'ms_bind_ipv4': True, 'ms_bind_ipv6': False} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_network_data_default, cfgfile) - net_name = utils.get_ceph_networks(cfgfile.name, - 'storage', 'storage_mgmt') - self.assertEqual(expected, net_name) - - def test_network_data(self): - expected = {'cluster_network': '172.16.3.0/24', - 'cluster_network_name': 'storage_mgmt', - 'public_network': '172.16.1.0/24', - 'public_network_name': 'storage', - 'ms_bind_ipv4': True, 'ms_bind_ipv6': False} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_network_data, cfgfile) - net_name = utils.get_ceph_networks(cfgfile.name, - 'storage', 'storage_mgmt') - self.assertEqual(expected, net_name) - - def test_network_data_v6(self): - expected = {'cluster_network': 'fd00:fd00:fd00:4000::/64', - 'cluster_network_name': 'storage_mgmt', - 'public_network': 'fd00:fd00:fd00:3000::/64', - 'public_network_name': 'storage', - 'ms_bind_ipv4': False, 'ms_bind_ipv6': True} - [net.setdefault('ipv6', True) for net in self.fake_network_data] - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_network_data, cfgfile) - net_name = utils.get_ceph_networks(cfgfile.name, - 'storage', 'storage_mgmt') - self.assertEqual(expected, net_name) - - def test_network_data_subnets(self): - expected = {'cluster_network': '172.16.12.0/24', - 'cluster_network_name': 'storage_mgmt_cloud_0', - 'public_network': '172.16.11.0/24', - 'public_network_name': 'storage_cloud_0', - 'ms_bind_ipv4': True, 'ms_bind_ipv6': False} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_network_data_subnet, cfgfile) - net_name = utils.get_ceph_networks(cfgfile.name, - 'storage', 'storage_mgmt') - self.assertEqual(expected, net_name) - - def test_network_data_subnets_override_names(self): - expected = {'cluster_network': '172.16.12.0/24', - 'cluster_network_name': 'storage_mgmt_cloud_0', - 'public_network': '172.16.11.0/24', - 'public_network_name': 'storage_cloud_0', - 'ms_bind_ipv4': True, 'ms_bind_ipv6': False} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_network_data_subnet, cfgfile) - net_name = utils.get_ceph_networks(cfgfile.name, - 'storage_cloud_0', - 'storage_mgmt_cloud_0') - self.assertEqual(expected, net_name) - - def test_network_data_subnets_multiple(self): - expected = {'cluster_network': '172.16.12.0/24,172.16.13.0/24', - 'cluster_network_name': 'storage_mgmt_cloud_0', - 'public_network': '172.16.14.0/24,172.16.15.0/24', - 'public_network_name': 'storage_cloud_0', - 'ms_bind_ipv4': True, 'ms_bind_ipv6': False} - with tempfile.NamedTemporaryFile(mode='w') as cfgfile: - yaml.safe_dump(self.fake_double_subnet, cfgfile) - net_name = utils.get_ceph_networks(cfgfile.name, - 'storage', 'storage_mgmt') - self.assertEqual(expected, net_name) - - -class TestGetHostsFromCephSpec(TestCase): - - specs = [] - specs.append(yaml.safe_load(''' - addr: 192.168.24.13 - hostname: ceph-0 - labels: - - _admin - - mon - - mgr - service_type: host - ''')) - - specs.append(yaml.safe_load(''' - addr: 192.168.24.20 - hostname: ceph-1 - labels: - - _admin - - mon - - mgr - service_type: host - ''')) - - specs.append(yaml.safe_load(''' - addr: 192.168.24.16 - hostname: ceph-2 - labels: - - _admin - - mon - - mgr - service_type: host - ''')) - - specs.append(yaml.safe_load(''' - addr: 192.168.24.14 - hostname: ceph-3 - labels: - - osd - service_type: host - ''')) - - specs.append(yaml.safe_load(''' - addr: 192.168.24.21 - hostname: ceph-4 - labels: - - osd - service_type: host - ''')) - - specs.append(yaml.safe_load(''' - addr: 192.168.24.17 - hostname: ceph-5 - labels: - - osd - service_type: host - ''')) - - specs.append(yaml.safe_load(''' - placement: - hosts: - - ceph-0 - - ceph-1 - - ceph-2 - service_id: mon - service_name: mon - service_type: mon - ''')) - - specs.append(yaml.safe_load(''' - placement: - hosts: - - ceph-0 - - ceph-1 - - ceph-2 - service_id: mgr - service_name: mgr - service_type: mgr - ''')) - - specs.append(yaml.safe_load(''' - data_devices: - all: true - placement: - hosts: - - ceph-3 - - ceph-4 - - ceph-5 - service_id: default_drive_group - service_name: osd.default_drive_group - service_type: osd - ''')) - - def test_get_hosts_from_ceph_spec(self): - expected = {'ceph__admin': ['ceph-0', 'ceph-1', 'ceph-2'], - 'ceph_mon': ['ceph-0', 'ceph-1', 'ceph-2'], - 'ceph_mgr': ['ceph-0', 'ceph-1', 'ceph-2'], - 'ceph_osd': ['ceph-3', 'ceph-4', 'ceph-5'], - 'ceph_non_admin': ['ceph-3', 'ceph-4', 'ceph-5']} - - cfgfile = tempfile.NamedTemporaryFile() - for spec in self.specs: - with open(cfgfile.name, 'a') as f: - f.write('---\n') - f.write(yaml.safe_dump(spec)) - hosts = utils.get_host_groups_from_ceph_spec(cfgfile.name, - prefix='ceph_') - cfgfile.close() - - self.assertEqual(expected, hosts) - - def test_get_addr_from_ceph_spec(self): - expected = {'_admin': ['192.168.24.13', - '192.168.24.20', - '192.168.24.16'], - 'mon': ['192.168.24.13', - '192.168.24.20', - '192.168.24.16'], - 'mgr': ['192.168.24.13', - '192.168.24.20', - '192.168.24.16'], - 'osd': ['192.168.24.14', - '192.168.24.21', - '192.168.24.17']} - - cfgfile = tempfile.NamedTemporaryFile() - for spec in self.specs: - with open(cfgfile.name, 'a') as f: - f.write('---\n') - f.write(yaml.safe_dump(spec)) - hosts = utils.get_host_groups_from_ceph_spec(cfgfile.name, - key='addr', - get_non_admin=False) - cfgfile.close() - - self.assertEqual(expected, hosts) - - -class TestProcessCephDaemons(TestCase): - - def test_process_ceph_daemons(self): - - daemon_opt = yaml.safe_load(''' - ceph_nfs: - cephfs_data: manila_data - cephfs_metadata: manila_metadata - ''') - - expected = { - 'tripleo_cephadm_daemon_ceph_nfs': True, - 'cephfs_data': 'manila_data', - 'cephfs_metadata': 'manila_metadata' - } - - # daemon_input = tempfile.NamedTemporaryFile() - with tempfile.NamedTemporaryFile(mode='w') as f: - yaml.safe_dump(daemon_opt, f) - found = utils.process_ceph_daemons(f.name) - - self.assertEqual(found, expected) - - -class TestCheckDeployBackups(TestCase): - - @mock.patch('tripleoclient.utils.LOG') - @mock.patch('prettytable.PrettyTable') - @mock.patch('os.statvfs') - @mock.patch('glob.iglob') - def test_check_deploy_backups( - self, mock_iglob, - mock_statvfs, mock_prettytable, mock_log): - working_dir = '/home/foo/overcloud-deploy/overcloud' - mock_iglob.return_value = ['x', 'y', 'z'] - mock_table = mock.Mock() - mock_prettytable.return_value = mock_table - mock_stat_return1 = mock.Mock() - mock_stat_return2 = mock.Mock() - mock_stat_return3 = mock.Mock() - mock_stat_return1.st_size = 1024 - mock_stat_return2.st_size = 2048 - mock_stat_return3.st_size = 4096 - mock_statvfs_return = mock.Mock() - mock_statvfs.return_value = mock_statvfs_return - mock_statvfs_return.f_frsize = 1024 - mock_statvfs_return.f_blocks = 100 - mock_statvfs_return.f_bfree = 10 - - with mock.patch('os.stat') as mock_stat: - mock_stat.side_effect = [ - mock_stat_return1, - mock_stat_return2, - mock_stat_return3] - utils.check_deploy_backups(working_dir) - - self.assertEqual(3, mock_table.add_row.call_count) - self.assertEqual(1.0, mock_table.add_row.call_args_list[0][0][0][1]) - self.assertEqual(2.0, mock_table.add_row.call_args_list[1][0][0][1]) - self.assertEqual(4.0, mock_table.add_row.call_args_list[2][0][0][1]) - mock_statvfs.assert_called_once_with('z') - self.assertIn( - 'Disk usage 90.00% exceeds 80% percent of disk size', - mock_log.warning.call_args_list[0][0][0]) - - mock_log.reset_mock() - mock_stat_return3.st_size = 81920 - - with mock.patch('os.stat') as mock_stat: - mock_stat.side_effect = [ - mock_stat_return1, - mock_stat_return2, - mock_stat_return3] - utils.check_deploy_backups(working_dir) - self.assertIn( - 'Deploy backup files disk usage 90.00% exceeds 50% percent', - mock_log.warning.call_args_list[0][0][0]) - - -class TestGetCephadmKeys(TestCase): - - def test_get_cephadm_keys(self): - user = 'openstack' - key = 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - pools = ['foo', 'bar'] - keys = utils.get_tripleo_cephadm_keys(user, - key, - pools) - expected = [ - {'name': 'client.openstack', - 'key': key, - 'mode': '0600', - 'caps': { - 'mgr': 'allow *', - 'mon': 'profile rbd', - 'osd': 'profile rbd pool=foo, profile rbd pool=bar'}}] - - self.assertEqual(keys, expected) diff --git a/tripleoclient/tests/v1/__init__.py b/tripleoclient/tests/v1/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/baremetal/__init__.py b/tripleoclient/tests/v1/baremetal/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/baremetal/fakes.py b/tripleoclient/tests/v1/baremetal/fakes.py deleted file mode 100644 index 1eaa8ae9c..000000000 --- a/tripleoclient/tests/v1/baremetal/fakes.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -import ironic_inspector_client -from osc_lib.tests import utils - - -class FakeInspectorClient(object): - def __init__(self, states=None, data=None): - self.states = states or {} - self.data = data or {} - self.on_introspection = [] - - def introspect(self, uuid): - self.on_introspection.append(uuid) - - def get_status(self, uuid): - try: - return self.states[uuid] - except KeyError: - raise ironic_inspector_client.ClientError(mock.Mock()) - - def get_data(self, uuid): - try: - return self.data[uuid] - except KeyError: - raise ironic_inspector_client.ClientError(mock.Mock()) - - def wait_for_finish(self, uuids): - return {uuid: self.states[uuid] for uuid in uuids} - - -class ClientWrapper(object): - - def __init__(self): - self._instance = None - - -class TestBaremetal(utils.TestCommand): - - def setUp(self): - super(TestBaremetal, self).setUp() - - self.app.client_manager.auth_ref = mock.Mock(auth_token="TOKEN") - self.app.client_manager.baremetal = mock.Mock() - self.app.client_manager.image = mock.Mock() - self.app.client_manager.baremetal_introspection = FakeInspectorClient() - self.app.client_manager._region_name = "Arcadia" - self.app.client_manager.session = mock.Mock() - self.app.client_manager.tripleoclient = ClientWrapper() - - def tearDown(self): - super(TestBaremetal, self).tearDown() - - mock.patch.stopall() diff --git a/tripleoclient/tests/v1/overcloud_backup/__init__.py b/tripleoclient/tests/v1/overcloud_backup/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_backup/test_backup.py b/tripleoclient/tests/v1/overcloud_backup/test_backup.py deleted file mode 100644 index 667018f6e..000000000 --- a/tripleoclient/tests/v1/overcloud_backup/test_backup.py +++ /dev/null @@ -1,580 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient import constants -from tripleoclient.tests import fakes -from tripleoclient.v1 import overcloud_backup -from unittest.mock import call - - -class TestOvercloudBackup(utils.TestCommand): - - def setUp(self): - super(TestOvercloudBackup, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_backup.BackupOvercloud(self.app, app_args) - self.inventory = '/tmp/test_inventory.yaml' - self.file = open(self.inventory, 'w').close() - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_noargs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_create_recover_image', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_init(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--init' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_init_nfs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--init', - 'nfs' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_setup_nfs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-nfs' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_setup_rear(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-rear', - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_setup_rear_ironic(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-ironic', - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-conf-ironic.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_backup_setup_nfs_rear_with_inventory(self, - mock_playbook): - arglist = [ - '--setup-nfs', - '--setup-rear', - '--inventory', - self.inventory - ] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - calls = [call(workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={}), - call(workdir=mock.ANY, - playbook='prepare-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={})] - - mock_playbook.assert_has_calls(calls) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_setup_rear_extra_vars_inline(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-rear', - '--extra-vars', - '{"tripleo_backup_and_restore_nfs_server": "192.168.24.1"}' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - extra_vars_dict = { - 'tripleo_backup_and_restore_nfs_server': '192.168.24.1' - } - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=extra_vars_dict - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_setup_rear_with_extra_vars(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-rear', - '--extra-vars', - '/tmp/test_vars.yaml' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars='/tmp/test_vars.yaml' - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_inventory(self, mock_playbook): - arglist = [ - '--inventory', - self.inventory - ] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_create_recover_image', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_no_inventory(self, mock_playbook): - arglist = [ - '--inventory', - '/tmp/no_inventory.yaml' - ] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaisesRegex( - RuntimeError, - 'The inventory file', - self.cmd.take_action, - parsed_args) - - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_no_readable_inventory(self, - mock_playbook, - mock_access): - arglist = [ - '--inventory', - self.inventory - ] - verifylist = [] - mock_access.return_value = False - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaisesRegex( - RuntimeError, - 'The inventory file', - self.cmd.take_action, - parsed_args) - - -class TestOvercloudSnapshot(utils.TestCommand): - - def setUp(self): - super(TestOvercloudSnapshot, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_backup.BackupSnapshot(self.app, app_args) - self.inventory = '/tmp/test_inventory.yaml' - self.file = open(self.inventory, 'w').close() - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_snapshot_noargs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-snapshot.yaml', - inventory=parsed_args.inventory, - tags='create_snapshots', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_snapshot_revert_remove(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--remove', - '--revert', - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaisesRegex( - RuntimeError, - '--revert and --remove are mutually exclusive', - self.cmd.take_action, - parsed_args) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_snapshot_revert(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--revert', - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-snapshot.yaml', - inventory=parsed_args.inventory, - tags='revert_snapshots', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_snapshot_remove(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--remove', - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-snapshot.yaml', - inventory=parsed_args.inventory, - tags='remove_snapshots', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_snapshot_inventory(self, mock_playbook): - arglist = [ - '--inventory', - self.inventory - ] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-snapshot.yaml', - inventory=parsed_args.inventory, - tags='create_snapshots', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={} - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_snapshot_extra_vars_inline(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--extra-vars', - '{"tripleo_snapshot_revert_var_size": "2G"}' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - extra_vars_dict = { - 'tripleo_snapshot_revert_var_size': '2G' - } - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-snapshot.yaml', - inventory=parsed_args.inventory, - tags='create_snapshots', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=extra_vars_dict - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_no_inventory(self, mock_playbook): - arglist = [ - '--inventory', - '/tmp/no_inventory.yaml' - ] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaisesRegex( - RuntimeError, - 'The inventory file', - self.cmd.take_action, - parsed_args) - - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_backup_no_readable_inventory(self, - mock_playbook, - mock_access): - arglist = [ - '--inventory', - self.inventory - ] - verifylist = [] - mock_access.return_value = False - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaisesRegex( - RuntimeError, - 'The inventory file', - self.cmd.take_action, - parsed_args) diff --git a/tripleoclient/tests/v1/overcloud_config/__init__.py b/tripleoclient/tests/v1/overcloud_config/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_deploy/__init__.py b/tripleoclient/tests/v1/overcloud_deploy/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_deploy/fakes.py b/tripleoclient/tests/v1/overcloud_deploy/fakes.py deleted file mode 100644 index 590108d0c..000000000 --- a/tripleoclient/tests/v1/overcloud_deploy/fakes.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleoclient.tests import fakes - - -FAKE_STACK = { - 'parameters': { - 'ControllerCount': 1, - 'ComputeCount': 1, - 'ObjectStorageCount': 0, - 'BlockStorageCount': 0, - 'CephStorageCount': 0, - 'DeployIdentifier': '', - }, - 'stack_name': 'overcloud', - 'stack_status': "CREATE_COMPLETE", - 'outputs': [{ - 'output_key': 'KeystoneURL', - 'output_value': 'http://0.0.0.0:8000', - }, { - 'output_key': 'EndpointMap', - 'output_value': { - 'KeystoneAdmin': { - 'host': '0.0.0.0', - 'uri': 'http://0.0.0.0:35357', - 'port': 35357, - }, - 'KeystoneInternal': { - 'host': '0.0.0.0', - 'uri': 'http://0.0.0.0:5000', - 'port': 5000, - }, - 'KeystonePublic': { - 'host': '0.0.0.0', - 'uri': 'http://0.0.0.0:5000', - 'port': 5000, - }, - 'NovaAdmin': { - 'host': '0.0.0.0', - 'uri': 'http://0.0.0.0:5000', - 'port': 8774, - }, - 'NovaInternal': { - 'host': '0.0.0.0', - 'uri': 'http://0.0.0.0:5000', - 'port': 8774, - }, - 'NovaPublic': { - 'host': '0.0.0.0', - 'uri': 'https://0.0.0.0:8774', - 'port': 8774, - }, - } - }] -} - - -def create_to_dict_mock(**kwargs): - mock_with_to_dict = mock.Mock() - mock_with_to_dict.configure_mock(**kwargs) - mock_with_to_dict.environment.return_value = {} - mock_with_to_dict.to_dict.return_value = kwargs - return mock_with_to_dict - - -def create_tht_stack(**kwargs): - stack = FAKE_STACK.copy() - stack.update(kwargs) - return create_to_dict_mock(**stack) - - -def create_env_with_ntp(**kwargs): - env = { - 'parameter_defaults': { - 'CinderEnableRbdBackend': True, - 'NtpServer': 'ntp.local', - }, - } - env.update(kwargs) - return env - - -def create_env(**kwargs): - env = { - 'parameter_defaults': { - 'CinderEnableRbdBackend': True, - }, - } - env.update(kwargs) - return env - - -class TestDeployOvercloud(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestDeployOvercloud, self).setUp(ansible_mock=False) diff --git a/tripleoclient/tests/v1/overcloud_deploy/test_overcloud_deploy.py b/tripleoclient/tests/v1/overcloud_deploy/test_overcloud_deploy.py deleted file mode 100644 index 5d0984503..000000000 --- a/tripleoclient/tests/v1/overcloud_deploy/test_overcloud_deploy.py +++ /dev/null @@ -1,1786 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import fixtures -from io import StringIO -import os -import shutil -import tempfile -import yaml -from unittest import mock - -from osc_lib import exceptions as oscexc -from osc_lib.tests import utils - -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient.tests.fixture_data import deployment -from tripleoclient.tests.v1.overcloud_deploy import fakes -from tripleoclient.v1 import overcloud_deploy - - -class TestDeployOvercloud(fakes.TestDeployOvercloud): - - def setUp(self): - super(TestDeployOvercloud, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_deploy.DeployOvercloud(self.app, app_args) - - self.parameter_defaults_env_file = ( - tempfile.NamedTemporaryFile(mode='w', delete=False).name) - self.tmp_dir = self.useFixture(fixtures.TempDir()) - - # Mock the history command to avoid leaking files - history_patcher = mock.patch('tripleoclient.utils.store_cli_param', - autospec=True) - history_patcher.start() - self.addCleanup(history_patcher.stop) - - self.real_shutil = shutil.rmtree - - self.uuid1_value = "uuid" - mock_uuid1 = mock.patch('uuid.uuid1', return_value=self.uuid1_value, - autospec=True) - mock_uuid1.start() - self.addCleanup(mock_uuid1.stop) - mock_uuid4 = mock.patch('uuid.uuid4', return_calue='uuid4', - autospec=True) - mock_uuid4.start() - self.addCleanup(mock_uuid4.stop) - - # Mock time to get predicdtable DeployIdentifiers - self.time_value = 12345678 - mock_time = mock.patch('time.time', return_value=self.time_value, - autospec=True) - mock_time.start() - self.addCleanup(mock_time.stop) - - # Mock copytree to avoid creating temporary templates - mock_copytree = mock.patch('shutil.copytree', - autospec=True) - mock_copytree.start() - self.addCleanup(mock_copytree.stop) - - # Mock sleep to reduce time of test - mock_sleep = mock.patch('time.sleep', autospec=True) - mock_sleep.start() - self.addCleanup(mock_sleep.stop) - - mock_run_command_and_log = mock.patch( - 'tripleoclient.utils.run_command_and_log', - autospec=True, - return_value=0) - mock_run_command_and_log.start() - self.addCleanup(mock_run_command_and_log.stop) - - mock_run_command = mock.patch( - 'tripleoclient.utils.run_command', - autospec=True, - return_value=0) - mock_run_command.start() - self.addCleanup(mock_run_command.stop) - - # Mock playbook runner - playbook_runner = mock.patch( - 'tripleoclient.utils.run_ansible_playbook', - autospec=True - ) - self.mock_playbook = playbook_runner.start() - self.addCleanup(playbook_runner.stop) - - # Mock role playbooks runner - role_playbooks = mock.patch( - 'tripleoclient.utils.run_role_playbooks', - autospec=True - ) - self.mock_role_playbooks = role_playbooks.start() - self.addCleanup(role_playbooks.stop) - - # Mock horizon url return - horizon_url = mock.patch( - 'tripleoclient.workflows.deployment.get_horizon_url', - autospec=True - ) - horizon_url.start() - horizon_url.return_value = 'fake://url:12345' - self.addCleanup(horizon_url.stop) - - # Mock copy to working dir - mock_copy_to_wd = mock.patch( - 'tripleoclient.utils.copy_to_wd', autospec=True) - mock_copy_to_wd.start() - self.addCleanup(mock_copy_to_wd.stop) - - mock_check_deploy_backups = mock.patch( - 'tripleoclient.utils.check_deploy_backups', autospec=True) - mock_check_deploy_backups.start() - self.addCleanup(mock_check_deploy_backups.stop) - - def tearDown(self): - super(TestDeployOvercloud, self).tearDown() - os.unlink(self.parameter_defaults_env_file) - shutil.rmtree = self.real_shutil - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_virtual_ips', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_networks', autospec=True) - @mock.patch('tripleoclient.utils.check_service_vips_migrated_to_service') - @mock.patch('tripleoclient.utils.build_stack_data', autospec=True) - @mock.patch('tripleo_common.utils.plan.default_image_params', - autospec=True, return_value={}) - @mock.patch('tripleoclient.utils.get_rc_params', - autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', autospec=True) - @mock.patch('tripleoclient.utils.get_ctlplane_attrs', autospec=True, - return_value={}) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch("heatclient.common.event_utils.get_events") - @mock.patch('tripleo_common.update.add_breakpoints_cleanup_into_env', - autospec=True) - @mock.patch('tripleoclient.utils.create_parameters_env', autospec=True) - @mock.patch('tripleoclient.utils.create_tempest_deployer_input', - autospec=True) - @mock.patch('heatclient.common.template_utils.get_template_contents', - autospec=True) - def test_tht_scale(self, mock_get_template_contents, - mock_create_tempest_deployer_input, - mock_create_parameters_env, - mock_breakpoints_cleanup, - mock_events, - mock_ceph_fsid, mock_swift_rgw, - mock_ceph_ansible, - mock_get_undercloud_host_entry, mock_copy, - mock_get_ctlplane_attrs, - mock_process_env, mock_roles_data, - mock_container_prepare, mock_generate_password, - mock_rc_params, mock_default_image_params, - mock_stack_data, mock_check_service_vip_migr, - mock_provision_networks, mock_provision_virtual_ips): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - mock_stack = fakes.create_tht_stack() - orchestration_client.stacks.get.return_value = mock_stack - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - arglist = ['--templates'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ] - - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - mock_event = mock.Mock() - mock_event.id = '1234' - mock_events.return_value = [mock_events] - mock_roles_data.return_value = [] - clients.network.api.find_attr.return_value = { - "id": "network id" - } - mock_get_template_contents.return_value = [{}, "template"] - mock_stack_data.return_value = {'environment_parameters': {}, - 'heat_resource_tree': {}} - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - baremetal = clients.baremetal - baremetal.node.list.return_value = range(10) - - expected_parameters = { - 'CephClusterFSID': self.uuid1_value, - 'CephStorageCount': 3, - 'ExtraConfig': '{}', - 'HypervisorNeutronPhysicalBridge': 'br-ex', - 'HypervisorNeutronPublicInterface': 'nic1', - 'NeutronDnsmasqOptions': 'dhcp-option-force=26,1400', - 'NeutronFlatNetworks': 'datacentre', - 'NeutronPublicInterface': 'nic1', - 'NtpServer': '', - 'SnmpdReadonlyUserPassword': 'PASSWORD', - 'DeployIdentifier': 12345678, - 'RootStackName': 'overcloud', - 'StackAction': 'CREATE', - 'UndercloudHostsEntries': [ - '192.168.0.1 uc.ctlplane.localhost uc.ctlplane'], - 'CtlplaneNetworkAttributes': {}, - } - - def _custom_create_params_env(parameters, tht_root, - stack): - for key, value in parameters.items(): - self.assertEqual(value, expected_parameters[key]) - parameter_defaults = {"parameter_defaults": parameters} - return parameter_defaults - - mock_create_parameters_env.side_effect = _custom_create_params_env - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - mock_process_env.return_value = {}, { - 'parameter_defaults': expected_parameters} - self.cmd.take_action(parsed_args) - - mock_get_template_contents.assert_called_with( - template_file=mock.ANY) - - mock_create_tempest_deployer_input.assert_called_with( - output_dir=self.cmd.working_dir) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_virtual_ips', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_networks', autospec=True) - @mock.patch('tripleoclient.utils.build_stack_data', autospec=True) - @mock.patch('tripleo_common.utils.plan.default_image_params', - return_value={}) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', autospec=True) - @mock.patch('tripleoclient.utils.get_ctlplane_attrs', autospec=True, - return_value={}) - @mock.patch('tripleoclient.workflows.deployment.create_overcloudrc', - autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_deploy_postconfig', autospec=True) - @mock.patch('tripleo_common.update.add_breakpoints_cleanup_into_env', - autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy._validate_vip_file') - @mock.patch('tripleoclient.v1.overcloud_deploy._validate_args') - @mock.patch('heatclient.common.template_utils.get_template_contents', - autospec=True) - @mock.patch('os.chmod', autospec=True) - @mock.patch('os.chdir', autospec=True) - @mock.patch('tempfile.mkdtemp', autospec=True) - @mock.patch('tripleoclient.utils.makedirs') - def test_tht_deploy(self, mock_md, mock_tmpdir, mock_cd, mock_chmod, - mock_get_template_contents, mock_validate_args, - mock_validate_vip_file, - mock_breakpoints_cleanup, mock_postconfig, - mock_get_undercloud_host_entry, - mock_copy, mock_overcloudrc, - mock_get_ctlplane_attrs, - mock_process_env, mock_roles_data, - mock_container_prepare, mock_generate_password, - mock_rc_params, mock_default_image_params, - mock_stack_data, mock_provision_networks, - mock_provision_virtual_ips): - mock_tmpdir.return_value = self.tmp_dir.path - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - utils_overcloud_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_overcloud_fixture) - arglist = ['--templates', '--no-cleanup'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ] - mock_stack_data.return_value = {'environment_parameters': {}, - 'heat_resource_tree': {}} - mock_tmpdir.return_value = self.tmp_dir.path - - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - clients.network.api.find_attr.return_value = { - "id": "network id" - } - mock_get_template_contents.return_value = [{}, "template"] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - baremetal = clients.baremetal - baremetal.node.list.return_value = range(10) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - parameters_env = { - 'parameter_defaults': { - 'StackAction': 'CREATE', - 'DeployIdentifier': 12345678, - 'RootStackName': 'overcloud', - 'UndercloudHostsEntries': - ['192.168.0.1 uc.ctlplane.localhost uc.ctlplane'], - 'CtlplaneNetworkAttributes': {}}} - mock_process_env.return_value = {}, parameters_env - mock_open_context = mock.mock_open() - with mock.patch('builtins.open', mock_open_context): - self.cmd.take_action(parsed_args) - - mock_get_template_contents.assert_called_with( - template_file=mock.ANY) - - utils_overcloud_fixture.mock_deploy_tht.assert_called_with( - output_dir=self.cmd.working_dir) - - mock_validate_args.assert_called_once_with(parsed_args) - mock_validate_vip_file.assert_not_called() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_virtual_ips', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_networks', autospec=True) - @mock.patch('tripleoclient.utils.build_stack_data', autospec=True) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', autospec=True) - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_deploy_postconfig', autospec=True) - @mock.patch('tripleo_common.update.add_breakpoints_cleanup_into_env', - autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy._validate_vip_file') - @mock.patch('tripleoclient.v1.overcloud_deploy._validate_args') - @mock.patch('tripleoclient.utils.create_parameters_env', autospec=True) - @mock.patch('heatclient.common.template_utils.get_template_contents', - autospec=True) - @mock.patch('shutil.rmtree', autospec=True) - @mock.patch('tempfile.mkdtemp', autospec=True) - def test_tht_deploy_skip_deploy_identifier( - self, mock_tmpdir, mock_rm, - mock_get_template_contents, - mock_create_parameters_env, mock_validate_args, - mock_validate_vip_file, - mock_breakpoints_cleanup, - mock_postconfig, - mock_ceph_fsid, mock_swift_rgw, mock_ceph_ansible, - mock_get_undercloud_host_entry, mock_copy, - mock_chdir, - mock_process_env, mock_roles_data, - mock_image_prepare, mock_generate_password, - mock_rc_params, mock_stack_data, - mock_provision_networks, mock_provision_virtual_ips): - mock_tmpdir.return_value = self.tmp_dir.path - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - utils_overcloud_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_overcloud_fixture) - - arglist = ['--templates', '--skip-deploy-identifier'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('skip_deploy_identifier', True) - ] - mock_stack_data.return_value = {'environment_parameters': {}, - 'heat_resource_tree': {}} - mock_tmpdir.return_value = "/tmp/tht" - - clients = self.app.client_manager - orchestration_client = clients.orchestration - mock_stack = fakes.create_tht_stack() - orchestration_client.stacks.get.side_effect = [mock_stack] - utils_fixture.mock_launch_heat.return_value = orchestration_client - - def _orch_clt_create(**kwargs): - orchestration_client.stacks.get.return_value = mock_stack - - orchestration_client.stacks.create.side_effect = _orch_clt_create - - clients.network.api.find_attr.return_value = { - "id": "network id" - } - mock_get_template_contents.return_value = [{}, "template"] - - mock_env = {} - mock_env['parameter_defaults'] = {} - mock_env['parameter_defaults']['ContainerHeatApiImage'] = \ - 'container-heat-api-image' - mock_env['parameter_defaults']['ContainerHeatEngineImage'] = \ - 'container-heat-engine-image' - mock_process_env.return_value = {}, mock_env - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - baremetal = clients.baremetal - baremetal.node.list.return_value = range(10) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - self.cmd.take_action(parsed_args) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_virtual_ips', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_networks', autospec=True) - @mock.patch('tripleoclient.utils.check_service_vips_migrated_to_service') - @mock.patch('tripleoclient.utils.build_stack_data', autospec=True) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch("heatclient.common.event_utils.get_events", autospec=True) - @mock.patch('tripleo_common.update.add_breakpoints_cleanup_into_env', - autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_deploy_postconfig', autospec=True) - @mock.patch('tripleoclient.utils.create_tempest_deployer_input', - autospec=True) - @mock.patch('heatclient.common.template_utils.get_template_contents', - autospec=True) - def test_deploy_custom_templates(self, mock_get_template_contents, - mock_create_tempest_deployer_input, - mock_deploy_postconfig, - mock_breakpoints_cleanup, - mock_events, - mock_ceph_fsid, mock_swift_rgw, - mock_ceph_ansible, - mock_get_undercloud_host_entry, - mock_copy, - mock_process_env, - mock_roles_data, - mock_image_prepare, - mock_generate_password, - mock_rc_params, - mock_stack_data, - mock_check_service_vip_migr, - mock_provision_networks, - mock_provision_virtual_ips): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - arglist = ['--templates', '/home/stack/tripleo-heat-templates'] - verifylist = [ - ('templates', '/home/stack/tripleo-heat-templates'), - ] - - mock_events.return_value = [] - - clients.network.api.find_attr.return_value = { - "id": "network id" - } - mock_get_template_contents.return_value = [{}, "template"] - mock_stack_data.return_value = {'environment_parameters': {}, - 'heat_resource_tree': {}} - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - baremetal = clients.baremetal - baremetal.node.list.return_value = range(10) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - env = {'parameter_defaults': {}, - 'resource_registry': {}} - mock_process_env.return_value = {}, env - with mock.patch('tempfile.mkstemp') as mkstemp: - mkstemp.return_value = (os.open(self.parameter_defaults_env_file, - os.O_RDWR), - self.parameter_defaults_env_file) - self.cmd.take_action(parsed_args) - - mock_get_template_contents.assert_called_with( - template_file=mock.ANY) - - mock_create_tempest_deployer_input.assert_called_with( - output_dir=self.cmd.working_dir) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_jinja2_env_path(self, mock_deploy_tht, mock_create_env): - - arglist = ['--templates', '-e', 'bad_path.j2.yaml', '-e', 'other.yaml', - '-e', 'bad_path2.j2.yaml'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('environment_files', ['bad_path.j2.yaml', 'other.yaml', - 'bad_path2.j2.yaml']) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises( - oscexc.CommandError, - self.cmd.take_action, parsed_args) - self.assertFalse(mock_deploy_tht.called) - - @mock.patch('tripleoclient.utils.check_service_vips_migrated_to_service') - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.process_multiple_environments', - autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_deploy_postconfig', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_update_parameters', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - def test_environment_dirs(self, mock_deploy_heat, mock_create_env, - mock_update_parameters, mock_post_config, - mock_ceph_fsid, - mock_swift_rgw, mock_ceph_ansible, - mock_copy, - mock_process_env, mock_rc_params, - mock_check_service_vip_migr): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_overcloud_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_overcloud_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - utils_overcloud_fixture.mock_utils_endpoint.return_value = 'foo.bar' - mock_update_parameters.return_value = {} - - test_env = os.path.join(self.tmp_dir.path, 'foo1.yaml') - - env_dirs = [os.path.join(os.environ.get('HOME', ''), '.tripleo', - 'environments'), self.tmp_dir.path] - - env = {'parameter_defaults': {}, - 'resource_registry': { - 'Test': 'OS::Heat::None', - 'resources': {'*': {'*': { - 'UpdateDeployment': {'hooks': []}}}}}} - env['parameter_defaults']['ContainerHeatApiImage'] = \ - 'container-heat-api-image' - env['parameter_defaults']['ContainerHeatEngineImage'] = \ - 'container-heat-engine-image' - - mock_process_env.return_value = {}, env - with open(test_env, 'w') as temp_file: - temp_file.write('resource_registry:\n Test: OS::Heat::None') - - arglist = ['--templates', '--environment-directory', self.tmp_dir.path] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('environment_directories', env_dirs), - ] - - def assertEqual(*args): - self.assertEqual(*args) - - def _fake_heat_deploy(self, stack_name, template_path, - environments, timeout, tht_root, - env, run_validations, - roles_file, - env_files_tracker=None, - deployment_options=None): - assertEqual( - {'parameter_defaults': { - 'ContainerHeatApiImage': 'container-heat-api-image', - 'ContainerHeatEngineImage': 'container-heat-engine-image', - }, - 'resource_registry': { - 'Test': 'OS::Heat::None', - 'resources': {'*': {'*': { - 'UpdateDeployment': {'hooks': []}}}}}}, env) - - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - mock_deploy_heat.side_effect = _fake_heat_deploy - mock_create_env.return_value = {} - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_deploy_postconfig', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_update_parameters', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - def test_environment_dirs_env_dir_not_found(self, mock_deploy_heat, - mock_update_parameters, - mock_post_config): - utils_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_fixture) - - mock_update_parameters.return_value = {} - utils_fixture.mock_utils_endpoint.return_value = 'foo.bar' - os.mkdir(self.tmp_dir.join('env')) - os.mkdir(self.tmp_dir.join('common')) - - arglist = ['--templates', '--environment-directory', '/tmp/notthere'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - error = self.assertRaises(oscexc.CommandError, self.cmd.take_action, - parsed_args) - self.assertIn('/tmp/notthere', str(error)) - - def test_validate_args_missing_environment_files(self): - arglist = ['--templates', - '-e', 'nonexistent.yaml'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('environment_files', ['nonexistent.yaml']), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(oscexc.CommandError, - overcloud_deploy._validate_args, - parsed_args) - - @mock.patch('os.path.isfile', autospec=True) - def test_validate_args_missing_rendered_files(self, mock_isfile): - tht_path = '/usr/share/openstack-tripleo-heat-templates/' - env_path = os.path.join(tht_path, 'noexist.yaml') - arglist = ['--templates', - '-e', env_path] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('environment_files', [env_path]), - ] - - mock_isfile.side_effect = [False, True] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - overcloud_deploy._validate_args(parsed_args) - calls = [mock.call(env_path), - mock.call(env_path.replace(".yaml", ".j2.yaml"))] - mock_isfile.assert_has_calls(calls) - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - def test_try_overcloud_deploy_with_first_template_existing( - self, mock_heat_deploy_func): - result = self.cmd._try_overcloud_deploy_with_compat_yaml( - '/fake/path', 'overcloud', ['~/overcloud-env.json'], 1, - {}, False, None, None) - # If it returns None it succeeded - self.assertIsNone(result) - mock_heat_deploy_func.assert_called_once_with( - self.cmd, 'overcloud', - '/fake/path/' + constants.OVERCLOUD_YAML_NAME, - ['~/overcloud-env.json'], 1, '/fake/path', {}, False, - None, deployment_options=None, env_files_tracker=None) - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - def test_try_overcloud_deploy_with_no_templates_existing( - self, mock_heat_deploy_func): - mock_heat_deploy_func.side_effect = oscexc.CommandError('error') - self.assertRaises(ValueError, - self.cmd._try_overcloud_deploy_with_compat_yaml, - '/fake/path', mock.ANY, - mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, - None) - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - def test_try_overcloud_deploy_show_missing_file( - self, mock_heat_deploy_func): - mock_heat_deploy_func.side_effect = \ - oscexc.CommandError('/fake/path not found') - try: - self.cmd._try_overcloud_deploy_with_compat_yaml( - '/fake/path', mock.ANY, - mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, - None) - except ValueError as value_error: - self.assertIn('/fake/path', str(value_error)) - - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_dry_run(self, mock_deploy, mock_create_env, - mock_get_undercloud_host_entry): - utils_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - mock_stack = fakes.create_tht_stack() - orchestration_client.stacks.get.return_value = mock_stack - arglist = ['--templates', '--dry-run'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('dry_run', True), - ] - - mock_create_env.return_value = ({}, []) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - self.assertFalse(utils_fixture.mock_deploy_tht.called) - self.assertFalse(mock_deploy.called) - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_virtual_ips', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_networks', autospec=True) - @mock.patch('tripleoclient.utils.check_service_vips_migrated_to_service') - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - @mock.patch('tempfile.mkdtemp', autospec=True) - @mock.patch('shutil.rmtree', autospec=True) - def test_answers_file(self, mock_rmtree, mock_tmpdir, - mock_heat_deploy, - mock_ceph_fsid, mock_swift_rgw, - mock_ceph_ansible, - mock_get_undercloud_host_entry, - mock_copy, - mock_roles_data, mock_image_prepare, - mock_generate_password, mock_rc_params, - mock_check_service_vip_migr, - mock_provision_networks, - mock_provision_virtual_ips): - mock_tmpdir.return_value = self.tmp_dir.path - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - mock_stack = fakes.create_tht_stack() - orchestration_client.stacks.get.return_value = mock_stack - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - utils_fixture.mock_launch_heat.return_value = orchestration_client - - clients = self.app.client_manager - - mock_tmpdir.return_value = self.tmp_dir.path - mock_rmtree.return_value = None - network_client = clients.network - network_client.stacks.get.return_value = None - net = network_client.api.find_attr('networks', 'ctlplane') - net.configure_mock(__getitem__=lambda x, y: 'testnet') - - test_env = self.tmp_dir.join('foo1.yaml') - with open(test_env, 'w') as temp_file: - temp_file.write('resource_registry:\n Test: OS::Heat::None') - - test_env2 = self.tmp_dir.join('foo2.yaml') - with open(test_env2, 'w') as temp_file: - temp_file.write('resource_registry:\n Test2: OS::Heat::None') - - os.makedirs(self.tmp_dir.join('tripleo-heat-templates')) - reg_file = self.tmp_dir.join( - 'tripleo-heat-templates/overcloud-resource-registry-puppet.yaml') - - with open(reg_file, 'w+') as temp_file: - temp_file.write('resource_registry:\n Test2: OS::Heat::None') - - test_answerfile = self.tmp_dir.join('answerfile') - with open(test_answerfile, 'w') as answerfile: - yaml.dump( - {'templates': - '/usr/share/openstack-tripleo-heat-templates/', - 'environments': [test_env] - }, - answerfile - ) - - arglist = ['--answers-file', test_answerfile, - '--environment-file', test_env2, - '--disable-password-generation', - '--working-dir', self.tmp_dir.path] - verifylist = [ - ('answers_file', test_answerfile), - ('environment_files', [test_env2]), - ('disable_password_generation', True)] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - self.cmd.take_action(parsed_args) - - self.assertTrue(mock_heat_deploy.called) - - # Check that Heat was called with correct parameters: - call_args = mock_heat_deploy.call_args[0] - self.assertEqual(call_args[2], - self.tmp_dir.join( - 'tripleo-heat-templates/overcloud.yaml')) - self.assertEqual(call_args[5], - self.tmp_dir.join('tripleo-heat-templates')) - self.assertIn('Test', call_args[6]['resource_registry']) - self.assertIn('Test2', call_args[6]['resource_registry']) - - utils_oc_fixture.mock_deploy_tht.assert_called_with( - output_dir=self.cmd.working_dir) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_virtual_ips', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_provision_networks', autospec=True) - @mock.patch('tripleoclient.utils.build_stack_data', autospec=True) - @mock.patch('tripleo_common.utils.plan.default_image_params', - autospec=True, - return_value=dict( - ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image')) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('tripleoclient.utils.get_ctlplane_attrs', autospec=True, - return_value={}) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_deploy_postconfig', autospec=True) - @mock.patch('tripleo_common.update.add_breakpoints_cleanup_into_env') - @mock.patch('tripleoclient.v1.overcloud_deploy._validate_vip_file') - @mock.patch('tripleoclient.v1.overcloud_deploy._validate_args') - @mock.patch('tripleoclient.utils.create_parameters_env', autospec=True) - @mock.patch('tripleoclient.utils.create_tempest_deployer_input', - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', autospec=True) - @mock.patch('heatclient.common.template_utils.get_template_contents', - autospec=True) - def test_tht_deploy_with_ntp(self, mock_get_template_contents, - mock_process_env, - mock_create_tempest_deployer_input, - mock_create_parameters_env, - mock_validate_args, - mock_validate_vip_file, - mock_breakpoints_cleanup, - mock_deploy_post_config, - mock_ceph_fsid, mock_swift_rgw, - mock_ceph_ansible, - mock_get_undercloud_host_entry, mock_copy, - mock_get_ctlplane_attrs, - mock_roles_data, - mock_image_prepare, - mock_generate_password, - mock_rc_params, - mock_default_image_params, - mock_stack_data, - mock_provision_networks, - mock_provision_virtual_ips): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - - mock_stack_data.return_value = {'environment_parameters': {}, - 'heat_resource_tree': {}} - arglist = ['--templates', '--ntp-server', 'ntp'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ] - - clients = self.app.client_manager - orchestration_client = clients.orchestration - mock_stack = fakes.create_tht_stack() - orchestration_client.stacks.get.side_effect = [ - mock_stack - ] - utils_fixture.mock_launch_heat.return_value = orchestration_client - - def _orch_clt_create(**kwargs): - orchestration_client.stacks.get.return_value = mock_stack - - orchestration_client.stacks.create.side_effect = _orch_clt_create - - clients.network.api.find_attr.return_value = { - "id": "network id" - } - mock_env = fakes.create_env_with_ntp() - mock_env['parameter_defaults']['ContainerHeatApiImage'] = \ - 'container-heat-api-image' - mock_env['parameter_defaults']['ContainerHeatEngineImage'] = \ - 'container-heat-engine-image' - mock_process_env.return_value = [{}, mock_env] - mock_get_template_contents.return_value = [{}, "template"] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - baremetal = clients.baremetal - baremetal.node.list.return_value = range(10) - - expected_parameters = { - 'CephClusterFSID': self.uuid1_value, - 'ExtraConfig': '{}', - 'HypervisorNeutronPhysicalBridge': 'br-ex', - 'HypervisorNeutronPublicInterface': 'nic1', - 'NeutronDnsmasqOptions': 'dhcp-option-force=26,1400', - 'NeutronFlatNetworks': 'datacentre', - 'NeutronNetworkType': 'gre', - 'NeutronPublicInterface': 'nic1', - 'NeutronTunnelTypes': 'gre', - 'SnmpdReadonlyUserPassword': 'PASSWORD', - 'StackAction': 'CREATE', - 'DeployIdentifier': 12345678, - 'RootStackName': 'overcloud', - 'NtpServer': 'ntp', - 'UndercloudHostsEntries': [ - '192.168.0.1 uc.ctlplane.localhost uc.ctlplane' - ], - 'CtlplaneNetworkAttributes': {}, - 'ContainerHeatApiImage': 'container-heat-api-image', - 'ContainerHeatEngineImage': 'container-heat-engine-image', - } - - def _custom_create_params_env(parameters, tht_root, - stack): - for key, value in parameters.items(): - self.assertEqual(value, expected_parameters[key]) - parameter_defaults = {"parameter_defaults": parameters} - return parameter_defaults - - mock_create_parameters_env.side_effect = _custom_create_params_env - - self.cmd.take_action(parsed_args) - - mock_get_template_contents.assert_called_with( - template_file=mock.ANY) - - mock_create_tempest_deployer_input.assert_called_with( - output_dir=self.cmd.working_dir) - - mock_validate_args.assert_called_once_with(parsed_args) - mock_validate_vip_file.assert_not_called() - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_deployed_server(self, mock_deploy, mock_create_env, - mock_get_undercloud_host_entry, - mock_copy, mock_rc_params): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - arglist = ['--templates', '--disable-validations'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('disable_validations', True), - ] - - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - clients = self.app.client_manager - clients.baremetal = mock.Mock() - clients.compute = mock.Mock() - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - mock_create_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - self.assertTrue(mock_deploy.called) - self.assertNotCalled(clients.baremetal) - self.assertNotCalled(clients.compute) - self.assertTrue(utils_oc_fixture.mock_deploy_tht.called) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_config_download( - self, mock_deploy, mock_create_env, - mock_get_undercloud_host_entry, - mock_copy, mock_rc_params): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - - arglist = ['--templates', '--config-download'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('config_download', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - mock_create_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - self.cmd.take_action(parsed_args) - self.assertTrue(mock_deploy.called) - self.assertTrue(fixture.mock_get_hosts_and_enable_ssh_admin.called) - self.assertTrue(fixture.mock_config_download.called) - self.assertTrue(fixture.mock_set_deployment_status.called) - self.assertEqual( - 'DEPLOY_SUCCESS', - fixture.mock_set_deployment_status.call_args[-1]['status'] - ) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_config_download_setup_only( - self, mock_deploy, mock_create_env, - mock_get_undercloud_host_entry, - mock_copy, mock_rc_params): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - mock_create_env.return_value = ({}, []) - mock_create_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - - arglist = ['--templates', '--config-download', '--setup-only'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('config_download', True), - ('setup_only', True) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - self.cmd.take_action(parsed_args) - self.assertTrue(fixture.mock_get_hosts_and_enable_ssh_admin.called) - self.assertTrue(fixture.mock_set_deployment_status.called) - self.assertEqual( - 'DEPLOY_SUCCESS', - fixture.mock_set_deployment_status.call_args[-1]['status'] - ) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_config_download_only( - self, mock_deploy, - mock_get_undercloud_host_entry, - mock_copy, mock_rc_params, - mock_create_parameters_env): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - mock_create_parameters_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - - arglist = ['--templates', '--config-download-only'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('config_download_only', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - self.cmd.take_action(parsed_args) - self.assertFalse(mock_deploy.called) - self.assertFalse(fixture.mock_get_hosts_and_enable_ssh_admin.called) - self.assertTrue(fixture.mock_config_download.called) - self.assertTrue(fixture.mock_set_deployment_status.called) - self.assertEqual( - 'DEPLOY_SUCCESS', - fixture.mock_set_deployment_status.call_args[-1]['status']) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.utils.create_tempest_deployer_input', - autospec=True) - @mock.patch('tripleoclient.utils.get_overcloud_endpoint', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_config_download_fails( - self, mock_deploy, - mock_overcloud_endpoint, - mock_create_tempest_deployer_input, - mock_get_undercloud_host_entry, - mock_copy, mock_rc_params, - mock_create_parameters_env): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - mock_create_parameters_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - - arglist = ['--templates', '--config-download-only'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('config_download_only', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - fixture.mock_config_download.side_effect = \ - exceptions.DeploymentError('fails') - self.assertRaises( - exceptions.DeploymentError, - self.cmd.take_action, - parsed_args) - self.assertFalse(mock_deploy.called) - self.assertTrue(fixture.mock_config_download.called) - self.assertTrue(fixture.mock_set_deployment_status.called) - self.assertEqual( - 'DEPLOY_FAILED', - fixture.mock_set_deployment_status.call_args[-1]['status']) - - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_override_ansible_cfg( - self, mock_deploy, mock_create_env, - mock_get_undercloud_host_entry, - mock_copy, mock_rc_params): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - - arglist = ['--templates', - '--override-ansible-cfg', 'ansible.cfg'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('override_ansible_cfg', 'ansible.cfg') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - mock_create_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - self.cmd.take_action(parsed_args) - self.assertTrue(fixture.mock_get_hosts_and_enable_ssh_admin.called) - self.assertTrue(fixture.mock_config_download.called) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.utils.check_service_vips_migrated_to_service') - @mock.patch('tripleo_common.utils.plan.default_image_params', - autospec=True) - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleo_common.utils.plan.generate_passwords', - return_value={}) - @mock.patch( - 'tripleo_common.image.kolla_builder.container_images_prepare_multi', - return_value={}) - @mock.patch('tripleoclient.utils.get_roles_data', - autospec=True, return_value={}) - @mock.patch('tripleoclient.utils.process_multiple_environments', - autospec=True) - @mock.patch('tripleoclient.utils.get_ctlplane_attrs', autospec=True, - return_value={}) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_heat_deploy', autospec=True) - @mock.patch('tripleoclient.utils.check_ceph_fsid_matches_env_files') - @mock.patch('tripleoclient.utils.check_swift_and_rgw') - @mock.patch('tripleoclient.utils.check_ceph_ansible') - @mock.patch('heatclient.common.template_utils.deep_update', autospec=True) - def test_config_download_timeout( - self, mock_hc, - mock_ceph_fsid, mock_swift_rgw, mock_ceph_ansible, - mock_hd, mock_get_undercloud_host_entry, mock_copy, - mock_get_ctlplane_attrs, - mock_process_env, mock_roles_data, - mock_container_prepare, mock_generate_password, - mock_rc_params, mock_default_image_params, - mock_check_service_vip_migr, - mock_create_parameters_env): - fixture = deployment.DeploymentWorkflowFixture() - self.useFixture(fixture) - utils_oc_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_oc_fixture) - utils_fixture = deployment.UtilsFixture() - self.useFixture(utils_fixture) - clients = self.app.client_manager - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = fakes.create_tht_stack() - utils_fixture.mock_launch_heat.return_value = orchestration_client - mock_create_parameters_env.return_value = [] - - arglist = ['--templates', '--overcloud-ssh-port-timeout', '42', - '--timeout', '451'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('overcloud_ssh_port_timeout', 42), ('timeout', 451) - ] - - mock_env = {} - mock_env['parameter_defaults'] = {} - mock_env['parameter_defaults']['ContainerHeatApiImage'] = \ - 'container-heat-api-image' - mock_env['parameter_defaults']['ContainerHeatEngineImage'] = \ - 'container-heat-engine-image' - mock_process_env.return_value = {}, mock_env - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - # assuming heat deploy consumed a 3m out of total 451m timeout - with mock.patch('time.time', side_effect=[1585820346, - 0, 12345678, 0, - 1585820526, - 1585820526, - 0, 0, 0, 0, 0]): - self.cmd.take_action(parsed_args) - self.assertIn([ - mock.call( - mock.ANY, 'overcloud', mock.ANY, - mock.ANY, 451, mock.ANY, - {'parameter_defaults': { - 'ContainerHeatApiImage': 'container-heat-api-image', - 'ContainerHeatEngineImage': - 'container-heat-engine-image'}}, - False, None, - env_files_tracker=mock.ANY, - deployment_options={})], - mock_hd.mock_calls) - self.assertIn( - [mock.call(mock.ANY, mock.ANY, mock.ANY, 'ctlplane', - os.path.join( - self.cmd.working_dir, - 'config-download'), - None, - deployment_options={}, - deployment_timeout=448, # 451 - 3, total time left - in_flight_validations=False, limit_hosts=None, - skip_tags=None, tags=None, timeout=42, - verbosity=3, forks=None, denyed_hostnames=None)], - fixture.mock_config_download.mock_calls) - fixture.mock_config_download.assert_called() - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.workflows.deployment.create_overcloudrc', - autospec=True) - @mock.patch('tripleoclient.workflows.deployment.make_config_download_dir') - @mock.patch('tripleoclient.utils.get_rc_params', autospec=True) - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - '_update_parameters') - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'create_env_files', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'deploy_tripleo_heat_templates', autospec=True) - def test_config_download_only_timeout( - self, mock_deploy, mock_create_env, - mock_get_undercloud_host_entry, mock_update, - mock_copyi, mock_rc_params, mock_cd_dir, - mock_create_overcloudrc): - utils_fixture = deployment.UtilsOvercloudFixture() - self.useFixture(utils_fixture) - utils_fixture2 = deployment.UtilsFixture() - self.useFixture(utils_fixture2) - clients = self.app.client_manager - stack = fakes.create_tht_stack() - stack.stack_name = 'overcloud' - stack.output_show.return_value = {'output': {'output_value': []}} - orchestration_client = clients.orchestration - orchestration_client.stacks.get.return_value = stack - utils_fixture2.mock_launch_heat.return_value = orchestration_client - - arglist = ['--templates', '--config-download-only', - '--overcloud-ssh-port-timeout', '42', - '--config-download-timeout', '240'] - verifylist = [ - ('templates', '/usr/share/openstack-tripleo-heat-templates/'), - ('config_download_only', True), - ('config_download_timeout', 240), - ('overcloud_ssh_port_timeout', 42) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - mock_rc_params.return_value = {'password': 'password', - 'region': 'region1'} - - mock_create_env.return_value = ( - dict(ContainerHeatApiImage='container-heat-api-image', - ContainerHeatEngineImage='container-heat-engine-image'), - []) - self.cmd.take_action(parsed_args) - playbook = os.path.join(os.environ.get( - 'HOME'), self.cmd.working_dir, - 'config-download/overcloud/deploy_steps_playbook.yaml') - self.assertIn( - [mock.call( - ansible_cfg=None, ansible_timeout=42, - extra_env_variables={'ANSIBLE_BECOME': True}, extra_vars=None, - inventory=mock.ANY, key=mock.ANY, limit_hosts=None, - playbook=playbook, playbook_dir=mock.ANY, - reproduce_command=True, skip_tags='opendev-validation', - ssh_user='tripleo-admin', tags=None, - timeout=240, - verbosity=3, workdir=mock.ANY, forks=None)], - utils_fixture2.mock_run_ansible_playbook.mock_calls) - - def test_provision_baremetal(self): - self.cmd.working_dir = self.tmp_dir.join('working_dir') - os.mkdir(self.cmd.working_dir) - bm_deploy_path = os.path.join( - self.cmd.working_dir, - 'tripleo-overcloud-baremetal-deployment.yaml') - baremetal_deployed = { - 'parameter_defaults': {'foo': 'bar'} - } - - deploy_data = [ - {'name': 'Compute', 'count': 10}, - {'name': 'Controller', 'count': 3}, - ] - with open(bm_deploy_path, 'w') as temp_file: - yaml.safe_dump(deploy_data, temp_file) - - ssh_key_path = self.tmp_dir.join('id_rsa.pub') - with open(ssh_key_path, 'w') as temp_file: - temp_file.write('sekrit') - - with open('{}.pub'.format(ssh_key_path), 'w') as f: - f.write('sekrit') - - arglist = [ - '--baremetal-deployment', bm_deploy_path, - '--overcloud-ssh-key', ssh_key_path, - '--templates', constants.TRIPLEO_HEAT_TEMPLATES, - ] - verifylist = [ - ('baremetal_deployment', bm_deploy_path), - ('overcloud_ssh_key', ssh_key_path), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - tht_root = self.tmp_dir.join('tht') - env_dir = os.path.join(tht_root, 'user-environments') - env_path = os.path.join(env_dir, 'baremetal-deployed.yaml') - os.makedirs(env_dir) - with open(env_path, 'w') as f: - yaml.safe_dump(baremetal_deployed, f) - - protected_overrides = {'registry_entries': dict(), - 'parameter_entries': dict()} - - self.cmd.working_dir = self.tmp_dir.join('working_dir') - result = self.cmd._provision_baremetal(parsed_args, tht_root, - protected_overrides) - self.cmd._unprovision_baremetal(parsed_args) - self.assertEqual([env_path], result) - self.mock_playbook.assert_has_calls([ - mock.call( - extra_vars={ - 'stack_name': 'overcloud', - 'baremetal_deployment': [ - {'count': 10, 'name': 'Compute'}, - {'count': 3, 'name': 'Controller'} - ], - 'baremetal_deployed_path': env_path, - 'ssh_public_keys': 'sekrit', - 'ssh_user_name': 'tripleo-admin', - 'ssh_private_key_file': self.tmp_dir.join('id_rsa.pub'), - 'manage_network_ports': True, - 'configure_networking': False, - 'working_dir': self.tmp_dir.join('working_dir'), - 'templates': constants.TRIPLEO_HEAT_TEMPLATES, - }, - inventory='localhost,', - playbook='cli-overcloud-node-provision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=3, - workdir=mock.ANY - ), - mock.call( - extra_vars={ - 'stack_name': 'overcloud', - 'baremetal_deployment': [ - {'count': 10, 'name': 'Compute'}, - {'count': 3, 'name': 'Controller'} - ], - 'prompt': False, - 'manage_network_ports': True, - }, - inventory='localhost,', - playbook='cli-overcloud-node-unprovision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=3, - workdir=mock.ANY - ) - ]) - self.mock_role_playbooks.assert_called_once_with( - self.cmd, - self.cmd.working_dir, - self.cmd.working_dir, - [ - {'count': 10, 'name': 'Compute'}, - {'count': 3, 'name': 'Controller'} - ], - False - ) - - def test__provision_networks(self): - self.cmd.working_dir = self.tmp_dir.join('working_dir') - os.mkdir(self.cmd.working_dir) - networks_file_path = os.path.join( - self.cmd.working_dir, 'tripleo-overcloud-network-data.yaml') - fake_network_data = [{'name': 'Network', 'name_lower': 'network'}] - fake_deployed_env = { - 'parameter_defaults': - {'DeployedNetworkEnvironment': {'foo': 'bar'}}, - 'resource_registry': - {'OS::TripleO::Network': 'foo'} - } - - with open(networks_file_path, 'w') as temp_file: - yaml.safe_dump(fake_network_data, temp_file) - - arglist = ['--networks-file', networks_file_path, - '--templates', constants.TRIPLEO_HEAT_TEMPLATES] - verifylist = [('networks_file', networks_file_path), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES)] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - tht_root = self.tmp_dir.join('tht') - env_dir = os.path.join(tht_root, 'user-environments') - env_path = os.path.join(env_dir, 'networks-deployed.yaml') - os.makedirs(env_dir) - - with open(env_path, 'w') as env_file: - env_file.write(yaml.safe_dump(data=fake_deployed_env)) - - protected_overrides = {'registry_entries': dict(), - 'parameter_entries': dict()} - - result = self.cmd._provision_networks(parsed_args, tht_root, - protected_overrides) - self.assertEqual([env_path], result) - self.mock_playbook.assert_called_once_with( - extra_vars={'network_data_path': networks_file_path, - 'network_deployed_path': env_path, - 'overwrite': True, - 'templates': constants.TRIPLEO_HEAT_TEMPLATES}, - inventory='localhost,', - playbook='cli-overcloud-network-provision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=3, - workdir=mock.ANY) - - def test__provision_virtual_ips(self): - self.cmd.working_dir = self.tmp_dir.join('working_dir') - os.mkdir(self.cmd.working_dir) - networks_file_path = os.path.join( - self.cmd.working_dir, 'tripleo-overcloud-network-data.yaml') - network_data = [ - {'name': 'Network', 'name_lower': 'network', 'subnets': {}} - ] - with open(networks_file_path, 'w') as temp_file: - yaml.safe_dump(network_data, temp_file) - vips_file_path = os.path.join( - self.cmd.working_dir, 'tripleo-overcloud-virtual-ips.yaml') - vip_data = [ - {'network': 'internal_api', 'subnet': 'internal_api_subnet'} - ] - with open(vips_file_path, 'w') as temp_file: - yaml.safe_dump(vip_data, temp_file) - - fake_deployed_env = { - 'parameter_defaults': {'VipPortMap': {'external': {'foo': 'bar'}}}, - 'resource_registry': { - 'OS::TripleO::Network::Ports::ExternalVipPort': 'foo'}} - stack_name = 'overcloud' - arglist = ['--stack', stack_name, - '--vip-file', vips_file_path, - '--networks-file', networks_file_path, - '--templates', constants.TRIPLEO_HEAT_TEMPLATES] - verifylist = [('stack', stack_name), - ('vip_file', vips_file_path), - ('networks_file', networks_file_path), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES)] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - tht_root = self.tmp_dir.join('tht') - env_dir = os.path.join(tht_root, 'user-environments') - env_path = os.path.join(env_dir, 'virtual-ips-deployed.yaml') - os.makedirs(env_dir) - - with open(env_path, 'w') as env_file: - env_file.write(yaml.safe_dump(data=fake_deployed_env)) - - protected_overrides = {'registry_entries': dict(), - 'parameter_entries': dict()} - - result = self.cmd._provision_virtual_ips(parsed_args, tht_root, - protected_overrides) - self.assertEqual([env_path], result) - self.mock_playbook.assert_called_once_with( - extra_vars={'stack_name': stack_name, - 'vip_data_path': vips_file_path, - 'vip_deployed_path': env_path, - 'overwrite': True, - 'templates': constants.TRIPLEO_HEAT_TEMPLATES}, - inventory='localhost,', - playbook='cli-overcloud-network-vip-provision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=3, - workdir=mock.ANY) - - def test_check_limit_warning(self): - mock_warning = mock.MagicMock() - mock_log = mock.MagicMock() - mock_log.warning = mock_warning - env = {'parameter_defaults': {}} - - old_logger = self.cmd.log - self.cmd.log = mock_log - self.cmd._check_limit_skiplist_warning(env) - self.cmd.log = old_logger - mock_warning.assert_not_called() - - def test_check_limit_warning_empty(self): - mock_warning = mock.MagicMock() - mock_log = mock.MagicMock() - mock_log.warning = mock_warning - env = {'parameter_defaults': {'DeploymentServerBlacklist': []}} - - old_logger = self.cmd.log - self.cmd.log = mock_log - self.cmd._check_limit_skiplist_warning(env) - self.cmd.log = old_logger - mock_warning.assert_not_called() - - def test_check_limit_warning_warns(self): - mock_warning = mock.MagicMock() - mock_log = mock.MagicMock() - mock_log.warning = mock_warning - env = {'parameter_defaults': {'DeploymentServerBlacklist': ['a']}} - - old_logger = self.cmd.log - self.cmd.log = mock_log - self.cmd._check_limit_skiplist_warning(env) - self.cmd.log = old_logger - expected_message = ('[WARNING] DeploymentServerBlacklist is defined ' - 'and will be ignored because --limit has been ' - 'specified.') - mock_warning.assert_called_once_with(expected_message) - - -class TestArgumentValidation(fakes.TestDeployOvercloud): - - def setUp(self): - super(TestArgumentValidation, self).setUp() - - def is_dir(arg): - if arg == '/tmp/real_dir': - return True - return False - - patcher = mock.patch('os.path.isdir') - mock_isdir = patcher.start() - mock_isdir.side_effect = is_dir - self.addCleanup(patcher.stop) - - app_args = mock.Mock() - app_args.verbose_level = 1 - self.validate = overcloud_deploy._validate_args_environment_dir - - def test_validate_env_dir(self): - self.assertIsNone(self.validate(['/tmp/real_dir'])) - - def test_validate_env_dir_empty(self): - self.assertIsNone(self.validate([])) - - def test_validate_env_dir_not_a_real_directory(self): - self.assertRaises(oscexc.CommandError, - self.validate, - ['/tmp/not_a_real_dir']) - - def test_validate_env_dir_ignore_default_not_existing(self): - full_path = os.path.expanduser(constants.DEFAULT_ENV_DIRECTORY) - self.assertIsNone(self.validate([full_path])) - - -class TestGetDeploymentStatus(utils.TestCommand): - - def setUp(self): - super(TestGetDeploymentStatus, self).setUp() - self.cmd = overcloud_deploy.GetDeploymentStatus(self.app, None) - self.app.client_manager = mock.Mock() - - @mock.patch("tripleoclient.workflows.deployment.get_deployment_status") - def test_get_deployment_status(self, mock_get_deployment_status): - parsed_args = self.check_parser(self.cmd, [], []) - self.cmd.app.stdout = StringIO() - status = 'DEPLOY_SUCCESS' - mock_get_deployment_status.return_value = status - - self.cmd.take_action(parsed_args) - - expected = ( - '+------------+-------------------+\n' - '| Stack Name | Deployment Status |\n' - '+------------+-------------------+\n' - '| overcloud | DEPLOY_SUCCESS |\n' - '+------------+-------------------+\n') - - self.assertEqual(expected, self.cmd.app.stdout.getvalue()) diff --git a/tripleoclient/tests/v1/overcloud_external_update/__init__.py b/tripleoclient/tests/v1/overcloud_external_update/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_external_update/fakes.py b/tripleoclient/tests/v1/overcloud_external_update/fakes.py deleted file mode 100644 index c37da4304..000000000 --- a/tripleoclient/tests/v1/overcloud_external_update/fakes.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from tripleoclient.tests import fakes - - -class TestOvercloudExternalUpdateRun(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudExternalUpdateRun, self).setUp() diff --git a/tripleoclient/tests/v1/overcloud_external_update/test_overcloud_external_update.py b/tripleoclient/tests/v1/overcloud_external_update/test_overcloud_external_update.py deleted file mode 100644 index d013827f3..000000000 --- a/tripleoclient/tests/v1/overcloud_external_update/test_overcloud_external_update.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import fixtures -import os -from unittest import mock - -from tripleoclient.tests.v1.overcloud_external_update import fakes -from tripleoclient.v1 import overcloud_external_update - - -class TestOvercloudExternalUpdateRun(fakes.TestOvercloudExternalUpdateRun): - - def setUp(self): - super(TestOvercloudExternalUpdateRun, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_external_update.ExternalUpdateRun( - self.app, app_args) - - uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4") - self.mock_uuid4 = uuid4_patcher.start() - self.addCleanup(self.mock_uuid4.stop) - - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_update_with_user_and_tags(self, mock_open, mock_execute, - mock_expanduser): - mock_expanduser.return_value = '/home/fake/' - argslist = ['--ssh-user', 'tripleo-admin', - '--tags', 'ceph'] - verifylist = [ - ('ssh_user', 'tripleo-admin'), - ('tags', 'ceph'), - ] - - self.check_parser(self.cmd, argslist, verifylist) - - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_update_with_user_and_extra_vars(self, mock_open, mock_execute, - mock_expanduser): - mock_expanduser.return_value = '/home/fake/' - argslist = ['--ssh-user', 'tripleo-admin', - '--extra-vars', 'key1=val1', - '--extra-vars', 'key2=val2'] - verifylist = [ - ('ssh_user', 'tripleo-admin'), - ('extra_vars', ['key1=val1', 'key2=val2']) - ] - - self.check_parser(self.cmd, argslist, verifylist) - - @mock.patch('tripleoclient.utils.ensure_run_as_normal_user') - @mock.patch('tripleoclient.workflows.deployment.config_download') - @mock.patch('tripleoclient.utils.get_default_working_dir', autospec=True) - @mock.patch('tripleoclient.workflows.deployment.snapshot_dir', - autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - @mock.patch('tripleoclient.utils.get_key') - def test_update_with_refresh( - self, mock_get_key, - mock_run_ansible_playbook, - mock_snapshot_dir, - mock_get_default_working_dir, - mock_config_download, - mock_usercheck): - argslist = ['--yes', '--refresh'] - verifylist = [ - ('refresh', True) - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - argslist = ['--yes'] - verifylist = [ - ('refresh', False) - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - - mock_get_key.return_value = '/test/key' - work_dir = self.useFixture(fixtures.TempDir()) - mock_get_default_working_dir.return_value = work_dir.path - ansible_dir = os.path.join(work_dir.path, 'config-download', - 'overcloud') - self.cmd.take_action(parsed_args) - mock_get_key.assert_called_once_with('overcloud') - mock_snapshot_dir.assert_called_once_with(ansible_dir) - mock_run_ansible_playbook.assert_called() - mock_config_download.assert_not_called() diff --git a/tripleoclient/tests/v1/overcloud_external_upgrade/__init__.py b/tripleoclient/tests/v1/overcloud_external_upgrade/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_external_upgrade/fakes.py b/tripleoclient/tests/v1/overcloud_external_upgrade/fakes.py deleted file mode 100644 index 9aaf9f534..000000000 --- a/tripleoclient/tests/v1/overcloud_external_upgrade/fakes.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from tripleoclient.tests import fakes - - -class TestOvercloudExternalUpgradeRun(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudExternalUpgradeRun, self).setUp() diff --git a/tripleoclient/tests/v1/overcloud_external_upgrade/test_overcloud_external_upgrade.py b/tripleoclient/tests/v1/overcloud_external_upgrade/test_overcloud_external_upgrade.py deleted file mode 100644 index 8cb6a5508..000000000 --- a/tripleoclient/tests/v1/overcloud_external_upgrade/test_overcloud_external_upgrade.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleoclient.tests.v1.overcloud_external_upgrade import fakes -from tripleoclient.v1 import overcloud_external_upgrade - - -class TestOvercloudExternalUpgradeRun(fakes.TestOvercloudExternalUpgradeRun): - - def setUp(self): - super(TestOvercloudExternalUpgradeRun, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_external_upgrade.ExternalUpgradeRun( - self.app, app_args) - - uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4") - self.mock_uuid4 = uuid4_patcher.start() - self.addCleanup(self.mock_uuid4.stop) - - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_upgrade_with_user_and_tags(self, mock_open, mock_execute, - mock_expanduser): - mock_expanduser.return_value = '/home/fake/' - argslist = ['--ssh-user', 'tripleo-admin', - '--tags', 'ceph'] - verifylist = [ - ('ssh_user', 'tripleo-admin'), - ('tags', 'ceph'), - ] - - self.check_parser(self.cmd, argslist, verifylist) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_upgrade_with_user_and_extra_vars(self, mock_open, mock_execute, - mock_expanduser, update_ansible): - mock_expanduser.return_value = '/home/fake/' - argslist = ['--ssh-user', 'tripleo-admin', - '--extra-vars', 'key1=val1', - '--extra-vars', 'key2=val2'] - verifylist = [ - ('ssh_user', 'tripleo-admin'), - ('extra_vars', ['key1=val1', 'key2=val2']) - ] - - self.check_parser(self.cmd, argslist, verifylist) diff --git a/tripleoclient/tests/v1/overcloud_image/__init__.py b/tripleoclient/tests/v1/overcloud_image/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_image/test_overcloud_image.py b/tripleoclient/tests/v1/overcloud_image/test_overcloud_image.py deleted file mode 100644 index c43099eb0..000000000 --- a/tripleoclient/tests/v1/overcloud_image/test_overcloud_image.py +++ /dev/null @@ -1,1411 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from datetime import datetime -import os -from unittest import mock - -from osc_lib import exceptions -import tripleo_common.arch -from tripleoclient.tests import base -from tripleoclient.tests.fakes import FakeHandle -from tripleoclient.tests.v1.test_plugin import TestPluginV1 -from tripleoclient import utils as plugin_utils -from tripleoclient.v1 import overcloud_image - - -class TestOvercloudImageBuild(TestPluginV1): - - def setUp(self): - super(TestOvercloudImageBuild, self).setUp() - - run_cmd = mock.patch('tripleoclient.utils.run_command') - self.mock_run_command = run_cmd.start() - self.addCleanup(run_cmd.stop) - # Get the command object to test - self.cmd = overcloud_image.BuildOvercloudImage(self.app, None) - - @mock.patch('tripleo_common.image.build.ImageBuildManager', autospec=True) - def test_overcloud_image_build_default_yaml(self, mock_manager): - arglist = [] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - mock_manager.assert_called_once_with( - ['/usr/share/openstack-tripleo-common/image-yaml/' - 'overcloud-images-python3.yaml', - '/usr/share/openstack-tripleo-common/image-yaml/' - 'overcloud-images-centos9.yaml'], - output_directory='.', - skip=True, - images=None) - cmd = ['sudo', 'dnf', 'install', '-y'] + self.cmd.REQUIRED_PACKAGES - self.mock_run_command.assert_called_once_with( - cmd, name="Install required packages") - - @mock.patch('tripleo_common.image.build.ImageBuildManager', autospec=True) - def test_overcloud_image_build_yaml(self, mock_manager): - arglist = ['--config-file', 'config.yaml'] - verifylist = [('config_files', ['config.yaml'])] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - mock_manager.assert_called_once_with( - ['config.yaml'], - output_directory='.', - skip=True, - images=None) - - @mock.patch('tripleo_common.image.build.ImageBuildManager', autospec=True) - def test_overcloud_image_build_multi_yaml(self, mock_manager): - arglist = ['--config-file', 'config1.yaml', - '--config-file', 'config2.yaml'] - verifylist = [('config_files', ['config1.yaml', 'config2.yaml'])] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - mock_manager.assert_called_once_with( - ['config1.yaml', 'config2.yaml'], - output_directory='.', - skip=True, - images=None) - - @mock.patch('tripleo_common.image.build.ImageBuildManager', autospec=True) - def test_overcloud_image_build_with_no_skip(self, mock_manager): - arglist = ['--config-file', 'config.yaml', '--no-skip'] - verifylist = [('config_files', ['config.yaml']), - ('skip', False)] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - mock_manager.assert_called_once_with( - ['config.yaml'], - output_directory='.', - skip=False, - images=None) - - @mock.patch('tripleo_common.image.build.ImageBuildManager', autospec=True) - def test_overcloud_image_build_with_output_directory(self, mock_manager): - arglist = ['--config-file', 'config.yaml', - '--output-directory', '/tmp/abc'] - verifylist = [('config_files', ['config.yaml']), - ('output_directory', '/tmp/abc')] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - mock_manager.assert_called_once_with( - ['config.yaml'], - output_directory='/tmp/abc', - skip=True, - images=None) - - -class TestBaseClientAdapter(base.TestCommand): - - def setUp(self): - super(TestBaseClientAdapter, self).setUp() - self.adapter = overcloud_image.BaseClientAdapter('/foo') - - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._copy_file', autospec=True) - def test_file_try_update_need_update(self, - mock_copy_file, - mock_files_changed, - mock_isfile): - mock_isfile.return_value = True - mock_files_changed.return_value = True - - self.adapter.file_create_or_update('discimg', 'discimgprod') - mock_copy_file.assert_not_called() - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._copy_file', autospec=True) - def test_file_try_update_do_update(self, - mock_copy_file, - mock_files_changed): - mock_files_changed.return_value = True - - self.update_existing = True - self.adapter.file_create_or_update('discimg', 'discimgprod') - mock_copy_file.assert_called_once_with( - self.adapter, 'discimg', 'discimgprod') - - @mock.patch('subprocess.check_call', autospec=True) - def test_copy_file(self, mock_subprocess_call): - self.adapter._copy_file('/foo.qcow2', 'bar.qcow2') - mock_subprocess_call.assert_has_calls( - [ - mock.call('sudo mkdir -m 0775 -p ""', shell=True), - mock.call('sudo cp -f "/foo.qcow2" "bar.qcow2"', shell=True) - ] - ) - - @mock.patch('subprocess.check_call', autospec=True) - def test_move_file(self, mock_subprocess_call): - self.adapter._move_file('/foo.qcow2', 'bar.qcow2') - mock_subprocess_call.assert_called_once_with( - 'sudo mv "/foo.qcow2" "bar.qcow2"', shell=True) - - @mock.patch('subprocess.check_call', autospec=True) - def test_make_dirs(self, mock_subprocess_call): - self.adapter._make_dirs('/foo/bar/baz') - mock_subprocess_call.assert_called_once_with( - 'sudo mkdir -m 0775 -p "/foo/bar/baz"', shell=True) - - -class TestFileImageClientAdapter(TestPluginV1): - - def setUp(self): - super(TestFileImageClientAdapter, self).setUp() - self.updated = [] - self.adapter = overcloud_image.FileImageClientAdapter( - image_path='/home/foo', - local_path='/my/images', - updated=self.updated - ) - self.image = mock.Mock() - self.image.id = 'file:///my/images/x86_64/overcloud-full.qcow2' - self.image.name = 'overcloud-full' - self.image.checksum = 'asdf' - self.image.created_at = '2019-11-14T01:33:39' - self.image.size = 982802432 - - @mock.patch('os.path.exists') - def test_get_image_property(self, mock_exists): - mock_exists.side_effect = [ - True, True, False, False - ] - image = mock.Mock() - image.id = 'file:///my/images/x86_64/overcloud-full.qcow2' - # file exists - self.assertEqual( - 'file:///my/images/x86_64/overcloud-full.vmlinuz', - self.adapter.get_image_property(image, 'kernel_id') - ) - self.assertEqual( - 'file:///my/images/x86_64/overcloud-full.initrd', - self.adapter.get_image_property(image, 'ramdisk_id') - ) - # file doesn't exist - self.assertIsNone( - self.adapter.get_image_property(image, 'kernel_id') - ) - self.assertIsNone( - self.adapter.get_image_property(image, 'ramdisk_id') - ) - self.assertRaises(ValueError, self.adapter.get_image_property, - image, 'foo') - - def test_paths(self): - self.assertEqual( - ('/my/images/x86_64', - 'overcloud-full.vmlinuz'), - self.adapter._paths( - 'overcloud-full', - plugin_utils.overcloud_kernel, - 'x86_64', - None - ) - ) - self.assertEqual( - ('/my/images', - 'overcloud-full.raw'), - self.adapter._paths( - 'overcloud-full', - plugin_utils.overcloud_image, - None, - None - ) - ) - self.assertEqual( - ('/my/images/power9-ppc64le', - 'overcloud-full.initrd'), - self.adapter._paths( - 'overcloud-full', - plugin_utils.overcloud_ramdisk, - 'ppc64le', - 'power9' - ) - ) - - @mock.patch('os.path.exists') - @mock.patch('os.stat') - @mock.patch('tripleoclient.utils.file_checksum') - def test_get_image(self, mock_checksum, mock_stat, mock_exists): - st_mtime = 1573695219 - mock_exists.return_value = True - mock_stat.return_value.st_size = 982802432 - mock_stat.return_value.st_mtime = st_mtime - mock_checksum.return_value = 'asdf' - - image = self.adapter._get_image( - '/my/images/x86_64/overcloud-full.qcow2') - self.assertEqual( - 'file:///my/images/x86_64/overcloud-full.qcow2', - image.id - ) - self.assertEqual('overcloud-full', image.name) - self.assertEqual('asdf', image.checksum) - self.assertEqual(datetime.fromtimestamp(st_mtime).strftime("%M:%S"), - datetime.strptime( - image.created_at, "%Y-%m-%dT%H:%M:%S").strftime("%M:%S")) - self.assertEqual(982802432, image.size) - - @mock.patch('tripleoclient.utils.file_checksum') - @mock.patch('tripleoclient.v1.overcloud_image.' - 'FileImageClientAdapter._get_image', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._move_file', autospec=True) - def test_image_try_update(self, mock_move, mock_get_image, mock_checksum): - - # existing image with identical checksum - mock_checksum.return_value = 'asdf' - mock_get_image.return_value = self.image - self.assertEqual( - self.image, - self.adapter._image_try_update( - '/home/foo/overcloud-full.qcow2', - '/my/images/x86_64/overcloud-full.qcow2' - ) - ) - self.assertEqual([], self.updated) - - # no image to update - mock_get_image.return_value = None - self.assertIsNone( - self.adapter._image_try_update( - '/home/foo/overcloud-full.qcow2', - '/my/images/x86_64/overcloud-full.qcow2' - ) - ) - self.assertEqual([], self.updated) - - # existing image with different checksum, but update_existing=False - mock_checksum.return_value = 'fdsa' - mock_get_image.return_value = self.image - self.assertEqual( - self.image, - self.adapter._image_try_update( - '/home/foo/overcloud-full.qcow2', - '/my/images/x86_64/overcloud-full.qcow2' - ) - ) - self.assertEqual([], self.updated) - - # existing image with different checksum, update_existing=True - self.adapter.update_existing = True - self.assertIsNone( - self.adapter._image_try_update( - '/home/foo/overcloud-full.qcow2', - '/my/images/x86_64/overcloud-full.qcow2' - ) - ) - self.assertEqual( - ['/my/images/x86_64/overcloud-full.qcow2'], - self.updated - ) - mock_move.assert_called_once_with( - self.adapter, - '/my/images/x86_64/overcloud-full.qcow2', - '/my/images/x86_64/overcloud-full_20191114T013339.qcow2' - ) - - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'FileImageClientAdapter._get_image', autospec=True) - @mock.patch('os.path.isdir') - def test_upload_image(self, mock_isdir, mock_get_image, - mock_subprocess_call): - mock_isdir.return_value = False - - mock_get_image.return_value = self.image - - result = self.adapter._upload_image( - '/home/foo/overcloud-full.qcow2', - '/my/images/x86_64/overcloud-full.qcow2' - ) - self.assertEqual(self.image, result) - mock_subprocess_call.assert_has_calls([ - mock.call('sudo mkdir -m 0775 -p "/my/images/x86_64"', shell=True), - mock.call('sudo cp -f "/home/foo/overcloud-full.qcow2" ' - '"/my/images/x86_64/overcloud-full.qcow2"', shell=True) - ]) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'FileImageClientAdapter._upload_image', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'FileImageClientAdapter._image_try_update', autospec=True) - def test_update_or_upload(self, mock_image_try_update, mock_upload_image): - - # image exists - mock_image_try_update.return_value = self.image - self.assertEqual( - self.image, - self.adapter.update_or_upload( - image_name='overcloud-full', - properties={}, - names_func=plugin_utils.overcloud_image, - arch='x86_64' - ) - ) - mock_upload_image.assert_not_called() - - # image needs uploading - mock_image_try_update.return_value = None - mock_upload_image.return_value = self.image - self.assertEqual( - self.image, - self.adapter.update_or_upload( - image_name='overcloud-full', - properties={}, - names_func=plugin_utils.overcloud_image, - arch='x86_64' - ) - ) - mock_upload_image.assert_called_once_with( - self.adapter, - '/home/foo/overcloud-full.raw', - '/my/images/x86_64/overcloud-full.raw' - ) - mock_image_try_update.assert_has_calls([ - mock.call(self.adapter, - '/home/foo/overcloud-full.raw', - '/my/images/x86_64/overcloud-full.raw'), - mock.call(self.adapter, - '/home/foo/overcloud-full.raw', - '/my/images/x86_64/overcloud-full.raw') - ]) - - -class TestGlanceClientAdapter(TestPluginV1): - - def setUp(self): - super(TestGlanceClientAdapter, self).setUp() - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.version = 2.0 - self._arch = tripleo_common.arch.kernel_arch() - self.app.client_manager.image.images.create.return_value = ( - mock.Mock(id=10, name='imgname', - properties={'kernel_id': 10, 'ramdisk_id': 10, - 'hw_architecture': self._arch}, - created_at='2015-07-31T14:37:22.000000')) - self.updated = [] - self.adapter = overcloud_image.GlanceClientAdapter( - client=self.app.client_manager.image, - image_path='/foo', - updated=self.updated - ) - - def test_get_image_exists(self): - image_mock = mock.Mock(name='imagename') - self.app.client_manager.image.find_image.return_value = image_mock - self.assertEqual(self.adapter._get_image('imagename'), image_mock) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_image_try_update_no_exist(self, mock_get_image): - mock_get_image.return_value = None - self.assertFalse(self.adapter._image_try_update( - 'name', 'fn')) - self.assertEqual([], self.updated) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_image_try_update_need_update(self, - mock_get_image, - mock_image_changed): - image_mock = mock.Mock(name='imagename') - mock_get_image.return_value = image_mock - mock_image_changed.return_value = True - self.assertEqual( - self.adapter._image_try_update('name', 'fn'), - image_mock - ) - self.assertEqual([], self.updated) - self.app.client_manager.image.update_image.assert_not_called() - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_image_try_update_do_update(self, - mock_get_image, - mock_image_changed): - image_mock = mock.Mock(name='imagename', - created_at='2015-07-31T14:37:22.000000') - update_mock = mock.Mock(return_value=image_mock) - self.app.client_manager.image.update_image = update_mock - mock_get_image.return_value = image_mock - mock_image_changed.return_value = True - self.adapter.update_existing = True - self.assertEqual( - self.adapter._image_try_update('name', 'fn'), - None - ) - self.assertEqual([image_mock.id], self.updated) - update_mock.assert_called_once() - - -class TestUploadOvercloudImage(TestPluginV1): - - def setUp(self): - super(TestUploadOvercloudImage, self).setUp() - - # Get the command object to test - self.cmd = overcloud_image.UploadOvercloudImage(self.app, None) - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.version = 2.0 - self._arch = tripleo_common.arch.kernel_arch() - self.app.client_manager.image.create_image.return_value = ( - mock.Mock(id=10, name='imgname', - properties={'kernel_id': 10, 'ramdisk_id': 10, - 'hw_architecture': self._arch}, - created_at='2015-07-31T14:37:22.000000')) - mock_cfe = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.check_file_exists', - autospec=True) - mock_cfe.start() - self.addCleanup(mock_cfe.stop) - mock_cfe.return_value = True - - mock_rifp = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.read_image_file_pointer', - autospec=True) - mock_rifp.start() - self.addCleanup(mock_rifp.stop) - self._file_handle = FakeHandle() - mock_rifp.return_value = self._file_handle - - @mock.patch.dict(os.environ, {'KEY': 'VALUE', 'OLD_KEY': 'OLD_VALUE'}) - def test_get_environment_var(self): - self.assertEqual('default-value', - self.cmd._get_environment_var('MISSING', - 'default-value')) - self.assertEqual('VALUE', - self.cmd._get_environment_var('KEY', - 'default-value')) - self.assertEqual('VALUE', - self.cmd._get_environment_var('KEY', - 'default-value', - deprecated=['MISSING'])) - self.assertEqual('OLD_VALUE', - self.cmd._get_environment_var('KEY', - 'default-value', - deprecated=['OLD_KEY'])) - - @mock.patch('os.path.exists') - def test_get_image_filename(self, mock_exists): - mock_exists.return_value = False - - parsed_args = self.check_parser(self.cmd, [], []) - self.assertEqual('overcloud-full.qcow2', - self.cmd._get_image_filename(parsed_args)) - - mock_exists.return_value = True - self.assertEqual('overcloud-hardened-uefi-full.qcow2', - self.cmd._get_image_filename(parsed_args)) - - parsed_args = self.check_parser( - self.cmd, ['--os-image-name', 'overcloud-custom.qcow2'], []) - self.assertEqual('overcloud-custom.qcow2', - self.cmd._get_image_filename(parsed_args)) - - def test_platform_without_architecture_fail(self): - parsed_args = self.check_parser(self.cmd, ['--platform', 'SNB'], []) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_images_v2(self, - mock_get_image, - mock_subprocess_call, - mock_isfile, - mock_convert_image): - parsed_args = self.check_parser(self.cmd, ['--no-local'], []) - mock_isfile.return_value = False - - mock_get_image.return_value = None - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.images.delete.call_count - ) - self.assertEqual( - 3, - self.app.client_manager.image.create_image.call_count - ) - self.app.client_manager.image.create_image.assert_has_calls([ - mock.call(name='overcloud-full-vmlinuz', - disk_format='aki', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - mock.call(name='overcloud-full-initrd', - disk_format='ari', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - mock.call(name='overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - ]) - - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 4) - mock_subprocess_call.assert_has_calls([ - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel" ' - '"/var/lib/ironic/httpboot/agent.kernel"', shell=True), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs" ' - '"/var/lib/ironic/httpboot/agent.ramdisk"', shell=True) - ]) - - @mock.patch('os.path.isfile') - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_try_update', autospec=True) - def test_overcloud_create_images_image_path(self, - mock_image_try_update, - mock_get_image, - mock_subprocess_call, - mock_isfile): - parsed_args = self.check_parser(self.cmd, - ['--image-path', '/foo', - '--no-local'], - []) - mock_get_image.return_value = None - mock_image_try_update.return_value = None - mock_isfile.return_value = False - - self.cmd.take_action(parsed_args) - - self.cmd.adapter._image_try_update.assert_has_calls([ - mock.call(self.cmd.adapter, - 'overcloud-full-vmlinuz', - '/foo/overcloud-full.vmlinuz'), - mock.call(self.cmd.adapter, - 'overcloud-full-initrd', - '/foo/overcloud-full.initrd'), - mock.call(self.cmd.adapter, - 'overcloud-full', - '/foo/overcloud-full.raw'), - ]) - mock_subprocess_call.assert_has_calls([ - mock.call( - 'sudo qemu-img convert -O raw "/foo/overcloud-full.qcow2"' - ' "/foo/overcloud-full.raw"', - shell=True - ), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "/foo/ironic-python-agent.kernel"' - ' "/var/lib/ironic/httpboot/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "/foo/ironic-python-agent.initramfs"' - ' "/var/lib/ironic/httpboot/agent.ramdisk"', - shell=True) - ]) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_create_noupdate_images(self, - mock_files_changed, - mock_image_changed, - mock_get_image, - mock_subprocess_call, - mock_isfile, - mock_convert_image): - parsed_args = self.check_parser(self.cmd, ['--no-local'], []) - mock_isfile.return_value = True - mock_files_changed.return_value = True - - existing_image = mock.Mock(id=10, name='imgname', - properties={'kernel_id': 10, - 'ramdisk_id': 10}) - mock_get_image.return_value = existing_image - mock_image_changed.return_value = True - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 0, - self.app.client_manager.image.create_image.call_count - ) - self.assertEqual( - 0, - self.app.client_manager.image.image_update.call_count - ) - - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 0) - self.assertFalse(self.cmd.updated) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_create_update_images(self, - mock_files_changed, - mock_image_changed, - mock_get_image, - mock_subprocess_call, - mock_convert_image): - parsed_args = self.check_parser(self.cmd, ['--update-existing', - '--no-local'], []) - mock_files_changed.return_value = True - - existing_image = mock.Mock(id=10, name='imgname', - properties={'kernel_id': 10, - 'ramdisk_id': 10}, - created_at='2015-07-31T14:37:22.000000') - mock_get_image.return_value = existing_image - mock_image_changed.return_value = True - self.app.client_manager.image.image_update.return_value = ( - existing_image) - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 3, - self.app.client_manager.image.create_image.call_count - ) - self.assertEqual( - 6, # 3 for new uploads, 3 updating the existsing - self.app.client_manager.image.update_image.call_count - ) - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 4) - self.assertTrue(self.cmd.updated) - - -class TestUploadOvercloudImageFull(TestPluginV1): - - def setUp(self): - super(TestUploadOvercloudImageFull, self).setUp() - - # Get the command object to test - self.cmd = overcloud_image.UploadOvercloudImage(self.app, None) - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.version = 2.0 - self._arch = tripleo_common.arch.kernel_arch() - self.app.client_manager.image.create_image.return_value = ( - mock.Mock(id=10, name='imgname', - properties={'hw_architecture': self._arch}, - created_at='2015-07-31T14:37:22.000000')) - mock_cfe = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.check_file_exists', - autospec=True) - mock_cfe.start() - self.addCleanup(mock_cfe.stop) - mock_cfe.return_value = True - - mock_rifp = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.read_image_file_pointer', - autospec=True) - mock_rifp.start() - self.addCleanup(mock_rifp.stop) - self._file_handle = FakeHandle() - mock_rifp.return_value = self._file_handle - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_images(self, - mock_get_image, - mock_subprocess_call, - mock_isfile, mock_convert_image): - parsed_args = self.check_parser(self.cmd, ['--whole-disk', - '--no-local'], []) - mock_isfile.return_value = False - - mock_get_image.return_value = None - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 1, - self.app.client_manager.image.create_image.call_count - ) - - self.app.client_manager.image.create_image.assert_has_calls([ - mock.call(name='overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public',), - ]) - # properties are set by updating the image - self.app.client_manager.image.update_image.assert_has_calls([ - mock.call(mock.ANY, hw_architecture=self._arch), - ]) - - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 4) - mock_subprocess_call.assert_has_calls([ - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/var/lib/ironic/httpboot/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/var/lib/ironic/httpboot/agent.ramdisk"', - shell=True) - ]) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_images_with_arch(self, - mock_get_image, - mock_subprocess_call, - mock_isfile, - mock_convert_image): - parsed_args = self.check_parser(self.cmd, - ['--whole-disk', '--arch', 'ppc64le', - '--no-local'], - []) - mock_isfile.return_value = False - - mock_get_image.return_value = None - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 1, - self.app.client_manager.image.create_image.call_count - ) - - self.app.client_manager.image.create_image.assert_has_calls([ - mock.call(name='ppc64le-overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - ]) - - self.app.client_manager.image.update_image.assert_has_calls([ - mock.call(mock.ANY, hw_architecture='ppc64le'), - ]) - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 4) - mock_subprocess_call.assert_has_calls([ - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/var/lib/ironic/httpboot/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/var/lib/ironic/httpboot/agent.ramdisk"', - shell=True) - ]) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_noupdate_images(self, mock_get_image, - mock_files_changed, - mock_image_changed, - mock_subprocess_call, - mock_isfile, - mock_convert_image): - parsed_args = self.check_parser(self.cmd, ['--whole-disk', - '--no-local'], []) - mock_isfile.return_value = True - mock_files_changed.return_value = True - - existing_image = mock.Mock(id=10, name='imgname', - properties={'hw_architecture': self._arch}) - mock_get_image.return_value = existing_image - self.cmd._image_changed = mock.Mock(return_value=True) - mock_image_changed.return_value = True - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 0, - self.app.client_manager.image.create_image.call_count - ) - self.assertEqual( - 0, - self.app.client_manager.image.update_image.call_count - ) - - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 0) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_update_images(self, mock_get_image, - mock_files_changed, - mock_image_changed, - mock_subprocess_call, - mock_convert_image): - parsed_args = self.check_parser( - self.cmd, ['--update-existing', '--whole-disk', - '--no-local'], []) - mock_files_changed.return_value = True - - existing_image = mock.Mock(id=10, name='imgname', - properties={'hw_architecture': self._arch}, - created_at='2015-07-31T14:37:22.000000') - mock_get_image.return_value = existing_image - mock_image_changed.return_value = True - self.app.client_manager.image.update_image.return_value = ( - existing_image) - - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 1, - self.app.client_manager.image.create_image.call_count - ) - self.assertEqual( - 2, # update 1 image *and* add properties to 1 image - self.app.client_manager.image.update_image.call_count - ) - self.assertEqual(mock_convert_image.call_count, 1) - self.assertEqual(mock_subprocess_call.call_count, 4) - - -class TestUploadOvercloudImageFullMultiArch(TestPluginV1): - # NOTE(tonyb): Really only the id is important below, but the names make - # reading logfiles a little nicer - images = [ - mock.Mock(id=10, name='overcloud-full'), - mock.Mock(id=11, name='ppc64le-overcloud-full'), - mock.Mock(id=12, name='p9-ppc64le-overcloud-full'), - ] - - def setUp(self): - super(TestUploadOvercloudImageFullMultiArch, self).setUp() - - # Get the command object to test - self.cmd = overcloud_image.UploadOvercloudImage(self.app, None) - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.version = 2.0 - # NOTE(tonyb): This is a little fragile. It works because - # GlanceClientAdapter._upload_image() calls - # self.client.images.create() and self.client.images.get() once each - # call so this way we always create() and get() the same mocked "image" - self.app.client_manager.image.create_image.side_effect = self.images - self.app.client_manager.image.get_image.side_effect = self.images - - mock_cfe = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.check_file_exists', - autospec=True) - mock_cfe.start() - self.addCleanup(mock_cfe.stop) - mock_cfe.return_value = True - - mock_rifp = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.read_image_file_pointer', - autospec=True) - mock_rifp.start() - self.addCleanup(mock_rifp.stop) - self._file_handle = FakeHandle() - mock_rifp.return_value = self._file_handle - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('tripleo_common.arch.kernel_arch', autospec=True, - return_value='x86_64') - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_images_with_arch(self, mock_get_image, - mock_subprocess_call, - mock_isfile, mock_arch, - mock_convert_image): - mock_isfile.return_value = False - mock_get_image.return_value = None - - parsed_args = self.check_parser(self.cmd, - ['--whole-disk', '--no-local'], - []) - self.cmd.take_action(parsed_args) - parsed_args = self.check_parser(self.cmd, - ['--whole-disk', - '--http-boot', '/httpboot/ppc64le', - '--arch', 'ppc64le', '--no-local'], - []) - self.cmd.take_action(parsed_args) - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 2, - self.app.client_manager.image.create_image.call_count - ) - - self.app.client_manager.image.create_image.assert_has_calls([ - mock.call(name='overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - mock.call(name='ppc64le-overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - ]) - - self.app.client_manager.image.update_image.assert_has_calls([ - mock.call(10, hw_architecture='x86_64'), - mock.call(11, hw_architecture='ppc64le'), - ]) - self.assertEqual(mock_convert_image.call_count, 2) - self.assertEqual(mock_subprocess_call.call_count, 8) - mock_subprocess_call.assert_has_calls([ - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/var/lib/ironic/httpboot/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/var/lib/ironic/httpboot/agent.ramdisk"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/httpboot/ppc64le"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/httpboot/ppc64le/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/httpboot/ppc64le"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/httpboot/ppc64le/agent.ramdisk"', - shell=True) - ]) - - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._convert_image', autospec=True) - @mock.patch('tripleo_common.arch.kernel_arch', autospec=True, - return_value='x86_64') - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._get_image', autospec=True) - def test_overcloud_create_images_with_arch_and_pltform(self, - mock_get_image, - mock_subprocess, - mock_isfile, - mock_arch, - mock_convert_image): - mock_isfile.return_value = False - mock_get_image.return_value = None - - parsed_args = self.check_parser(self.cmd, - ['--whole-disk', '--no-local'], - []) - self.cmd.take_action(parsed_args) - parsed_args = self.check_parser(self.cmd, - ['--whole-disk', - '--http-boot', '/httpboot/ppc64le', - '--architecture', 'ppc64le', - '--no-local'], - []) - self.cmd.take_action(parsed_args) - parsed_args = self.check_parser(self.cmd, - ['--whole-disk', - '--http-boot', '/httpboot/p9-ppc64le', - '--architecture', 'ppc64le', - '--platform', 'p9', - '--no-local'], - []) - self.cmd.take_action(parsed_args) - - self.assertEqual( - 0, - self.app.client_manager.image.delete_image.call_count - ) - self.assertEqual( - 3, - self.app.client_manager.image.create_image.call_count - ) - - self.app.client_manager.image.create_image.assert_has_calls([ - mock.call(name='overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - mock.call(name='ppc64le-overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - mock.call(name='p9-ppc64le-overcloud-full', - disk_format='raw', - container_format='bare', - data=mock.ANY, - validate_checksum=False, - visibility='public'), - ]) - - self.app.client_manager.image.update_image.assert_has_calls([ - mock.call(10, hw_architecture='x86_64'), - mock.call(11, hw_architecture='ppc64le'), - mock.call(12, hw_architecture='ppc64le', tripleo_platform='p9'), - ]) - self.assertEqual(mock_convert_image.call_count, 3) - self.assertEqual(mock_subprocess.call_count, 12) - mock_subprocess.assert_has_calls([ - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/var/lib/ironic/httpboot/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/var/lib/ironic/httpboot"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/var/lib/ironic/httpboot/agent.ramdisk"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/httpboot/ppc64le"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/httpboot/ppc64le/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/httpboot/ppc64le"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/httpboot/ppc64le/agent.ramdisk"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/httpboot/p9-ppc64le"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.kernel"' - ' "/httpboot/p9-ppc64le/agent.kernel"', - shell=True), - mock.call('sudo mkdir -m 0775 -p "/httpboot/p9-ppc64le"', - shell=True), - mock.call('sudo cp -f "./ironic-python-agent.initramfs"' - ' "/httpboot/p9-ppc64le/agent.ramdisk"', - shell=True) - ]) - - -class TestUploadOnlyExisting(TestPluginV1): - - def setUp(self): - super(TestUploadOnlyExisting, self).setUp() - - # Get the command object to test - self.cmd = overcloud_image.UploadOvercloudImage(self.app, None) - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.version = 2.0 - self.app.client_manager.image.create_image.return_value = ( - mock.Mock(id=10, name='imgname', properties={}, - created_at='2015-07-31T14:37:22.000000')) - mock_cfe = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.check_file_exists', - autospec=True) - mock_cfe.start() - self.addCleanup(mock_cfe.stop) - mock_cfe.return_value = True - - mock_rifp = mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter.read_image_file_pointer', - autospec=True) - mock_rifp.start() - self.addCleanup(mock_rifp.stop) - self._file_handle = FakeHandle() - mock_rifp.return_value = self._file_handle - - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_try_update', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_upload_just_ipa_wholedisk(self, - mock_files_changed, - mock_image_changed, - mock_image_try_update, - mock_isfile_call, - mock_subprocess_call): - mock_image_changed.return_value = True - mock_image_try_update.return_value = None - - parsed_args = self.check_parser( - self.cmd, ['--whole-disk', '--image-type=ironic-python-agent', - '--no-local'], []) - - mock_files_changed.return_value = True - self.cmd.take_action(parsed_args) - - # ensure check_file_exists has not been called - self.assertCountEqual( - self.cmd.adapter.check_file_exists.call_args_list, - [mock.call(self.cmd.adapter, './ironic-python-agent.initramfs'), - mock.call(self.cmd.adapter, './ironic-python-agent.kernel')]) - - self.assertFalse(mock_image_try_update.called) - - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_try_update', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_upload_just_os_wholedisk(self, - mock_files_changed, - mock_image_changed, - mock_image_try_update, - mock_isfile_call, - mock_subprocess_call): - mock_image_changed.return_value = True - mock_image_try_update.return_value = None - - parsed_args = self.check_parser( - self.cmd, ['--whole-disk', '--image-type=os', - '--no-local'], []) - - mock_files_changed.return_value = True - self.cmd.take_action(parsed_args) - - # ensure check_file_exists has been called just with ipa - self.cmd.adapter.check_file_exists.assert_called_once_with( - self.cmd.adapter, './overcloud-full.qcow2') - - # ensure try_update has been called just with ipa - mock_image_try_update.assert_called_once_with( - self.cmd.adapter, - 'overcloud-full', - './overcloud-full.raw' - ) - - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_try_update', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_upload_os_wholedisk_default(self, - mock_files_changed, - mock_image_changed, - mock_image_try_update, - mock_isfile_call, - mock_subprocess_call): - mock_image_changed.return_value = True - mock_image_try_update.return_value = None - - parsed_args = self.check_parser( - self.cmd, ['--image-type=os', - '--no-local', - '--os-image-name', - 'overcloud-hardened-uefi-full.qcow2'], []) - - mock_files_changed.return_value = True - self.cmd.take_action(parsed_args) - - # ensure check_file_exists has been called just with ipa - self.cmd.adapter.check_file_exists.assert_called_once_with( - self.cmd.adapter, './overcloud-hardened-uefi-full.qcow2') - - # ensure try_update has been called just with ipa - mock_image_try_update.assert_called_once_with( - self.cmd.adapter, - 'overcloud-hardened-uefi-full', - './overcloud-hardened-uefi-full.raw' - ) - - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_try_update', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_upload_just_ipa(self, - mock_files_changed, - mock_image_changed, - mock_image_try_update, - mock_isfile_call, - mock_subprocess_call): - mock_image_changed.return_value = True - mock_image_try_update.return_value = None - - parsed_args = self.check_parser( - self.cmd, ['--image-type=ironic-python-agent', - '--no-local'], []) - - mock_files_changed.return_value = True - self.cmd.take_action(parsed_args) - - # ensure check_file_exists has been called just with ipa - self.assertCountEqual( - self.cmd.adapter.check_file_exists.call_args_list, - [mock.call(self.cmd.adapter, './ironic-python-agent.initramfs'), - mock.call(self.cmd.adapter, './ironic-python-agent.kernel')] - ) - - self.assertFalse(mock_image_try_update.called) - - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_try_update', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'GlanceClientAdapter._image_changed', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_image.' - 'BaseClientAdapter._files_changed', autospec=True) - def test_overcloud_upload_just_os(self, - mock_files_changed, - mock_image_changed, - mock_image_try_update, - mock_isfile_call, - mock_subprocess_call): - mock_image_changed.return_value = True - mock_image_try_update.return_value = None - - parsed_args = self.check_parser( - self.cmd, ['--image-type=os', - '--no-local'], []) - - mock_files_changed.return_value = True - self.cmd.take_action(parsed_args) - - # ensure check_file_exists has been called just with os - self.assertCountEqual( - self.cmd.adapter.check_file_exists.call_args_list, - [mock.call(self.cmd.adapter, './overcloud-full.qcow2')]) - - # ensure try_update has been called just with ipa - mock_image_try_update.assert_has_calls([ - mock.call(self.cmd.adapter, - 'overcloud-full-vmlinuz', - './overcloud-full.vmlinuz'), - mock.call(self.cmd.adapter, - 'overcloud-full-initrd', - './overcloud-full.initrd'), - mock.call(self.cmd.adapter, - 'overcloud-full', - './overcloud-full.raw'), - ]) diff --git a/tripleoclient/tests/v1/overcloud_netenv_validate/__init__.py b/tripleoclient/tests/v1/overcloud_netenv_validate/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_netenv_validate/fakes.py b/tripleoclient/tests/v1/overcloud_netenv_validate/fakes.py deleted file mode 100644 index ed8537b16..000000000 --- a/tripleoclient/tests/v1/overcloud_netenv_validate/fakes.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from osc_lib.tests import utils - - -class TestValidateOvercloudNetenv(utils.TestCommand): - - def setUp(self): - super(TestValidateOvercloudNetenv, self).setUp() diff --git a/tripleoclient/tests/v1/overcloud_netenv_validate/test_overcloud_netenv_validate.py b/tripleoclient/tests/v1/overcloud_netenv_validate/test_overcloud_netenv_validate.py deleted file mode 100644 index 2e418eda9..000000000 --- a/tripleoclient/tests/v1/overcloud_netenv_validate/test_overcloud_netenv_validate.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -import tempfile -from unittest import mock - -import yaml - -from tripleoclient.tests.v1.overcloud_netenv_validate import fakes -from tripleoclient.v1 import overcloud_netenv_validate - - -EMPTY_NETENV = """resource_registry: - OS::TripleO::BlockStorage::Net::SoftwareConfig: /tmp/foo - -parameter_defaults: - NeutronExternalNetworkBridge: "''" -""" - - -class TestValidateOvercloudNetenv(fakes.TestValidateOvercloudNetenv): - - def setUp(self): - super(TestValidateOvercloudNetenv, self).setUp() - - # Get the command object to test - self.cmd = overcloud_netenv_validate.ValidateOvercloudNetenv( - self.app, None) - - def temporary_nic_config_file(self, bridges): - nic_config = { - 'resources': { - 'OsNetConfigImpl': { - 'properties': { - 'config': { - 'str_replace': { - 'params': { - '$network_config': { - 'network_config': bridges, - } - } - } - } - } - } - } - } - tmp = tempfile.NamedTemporaryFile(mode='w', delete=False) - yaml.dump(nic_config, tmp) - tmp.close() - return tmp.name - - def test_cidr_no_overlapping_networks(self): - networks = [ - '172.17.0.0/24', - '172.16.0.0/24', - '172.17.1.0/24', - '172.17.2.0/24', - '10.1.2.0/24', - ] - self.cmd.error_count = 0 - self.cmd.check_cidr_overlap(networks) - self.assertEqual(0, self.cmd.error_count) - - def test_cidr_overlapping_networks(self): - networks = [ - '172.17.1.0/24', - '172.17.1.0/24', - '10.1.2.0/24', - ] - self.cmd.error_count = 0 - self.cmd.check_cidr_overlap(networks) - self.assertEqual(1, self.cmd.error_count) - - def test_cidr_nonnumerical_address(self): - networks = [ - 'nonsense', - ] - self.cmd.error_count = 0 - self.cmd.check_cidr_overlap(networks) - self.assertEqual(1, self.cmd.error_count) - - def test_cidr_address_outside_of_range(self): - networks = [ - '172.17.0.278/24', - ] - self.cmd.error_count = 0 - self.cmd.check_cidr_overlap(networks) - self.assertEqual(1, self.cmd.error_count) - - def test_vlan_ids_unique(self): - vlans = { - 'InternalApiNetworkVlanID': 201, - 'StorageNetworkVlanID': 202, - 'StorageMgmtNetworkVlanID': 203, - 'TenantNetworkVlanID': 204, - 'ExternalNetworkVlanID': 100, - } - self.cmd.error_count = 0 - self.cmd.check_vlan_ids(vlans) - self.assertEqual(0, self.cmd.error_count) - - def test_vlan_ids_duplicate(self): - vlans = { - 'InternalApiNetworkVlanID': 201, - 'StorageNetworkVlanID': 202, - 'StorageMgmtNetworkVlanID': 203, - 'TenantNetworkVlanID': 202, # conflicts with StorageNetworkVlanID - 'ExternalNetworkVlanID': 100, - } - self.cmd.error_count = 0 - self.cmd.check_vlan_ids(vlans) - self.assertEqual(1, self.cmd.error_count) - - def test_allocation_pools_pairing_no_overlap(self): - filedata = { - 'InternalApiNetCidr': '172.17.0.0/24', - 'StorageNetCidr': '172.18.0.0/24', - 'InternalApiAllocationPools': [ - {'start': '172.17.0.10', 'end': '172.17.0.200'}], - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'}], - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '172.17.0.10', 'end': '172.17.0.200'}], - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'}], - } - self.cmd.error_count = 0 - self.cmd.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(0, self.cmd.error_count) - - def test_allocation_pools_pairing_inverse_range(self): - filedata = { - 'InternalApiNetCidr': '172.17.0.0/24', - 'StorageNetCidr': '172.18.0.0/24', - 'InternalApiAllocationPools': [ - {'start': '172.17.0.200', 'end': '172.17.0.10'}], - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'}], - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '172.17.0.200', 'end': '172.17.0.10'}], - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'}], - } - self.cmd.error_count = 0 - self.cmd.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(1, self.cmd.error_count) - - def test_allocation_pools_pairing_pool_outside_subnet(self): - filedata = { - 'InternalApiNetCidr': '172.17.0.0/24', - 'InternalApiAllocationPools': [ - {'start': '172.16.0.10', 'end': '172.16.0.200'}], - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '172.16.0.10', 'end': '172.16.0.200'}], - } - self.cmd.error_count = 0 - self.cmd.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(1, self.cmd.error_count) - - def test_allocation_pools_pairing_invalid_cidr(self): - filedata = { - 'InternalApiNetCidr': '172.17.0.298/24', - 'InternalApiAllocationPools': [ - {'start': '172.17.0.10', 'end': '172.17.0.200'}], - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '172.17.0.10', 'end': '172.17.0.200'}], - } - self.cmd.error_count = 0 - self.cmd.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(1, self.cmd.error_count) - - def test_allocation_pools_pairing_invalid_range(self): - filedata = { - 'InternalApiNetCidr': '172.17.0.0/24', - 'InternalApiAllocationPools': [ - {'start': '172.17.0.10', 'end': '172.17.0.287'}], - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '172.17.0.10', 'end': '172.17.0.287'}], - } - self.cmd.error_count = 0 - self.cmd.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(1, self.cmd.error_count) - - def test_nic_nonexistent_path(self): - self.cmd.error_count = 0 - self.cmd.NIC_validate('OS::TripleO::Controller::Net::SoftwareConfig', - 'this file that not exist') - self.assertEqual(1, self.cmd.error_count) - - def test_nic_valid_file(self): - bridges = [{ - 'type': 'ovs_bridge', - 'name': 'br-storage', - 'members': [ - {'type': 'interface', 'name': 'eth0'}, - {'type': 'interface', 'name': 'eth1'}, - {'type': 'ovs_bond', 'name': 'bond1'} - ], - }] - tmp = self.temporary_nic_config_file(bridges) - self.cmd.error_count = 0 - self.cmd.NIC_validate( - 'OS::TripleO::Controller::Net::SoftwareConfig', tmp) - os.unlink(tmp) - self.assertEqual(0, self.cmd.error_count) - - def test_nic_no_bond_too_many_interfaces(self): - bridges = [{ - 'type': 'ovs_bridge', - 'name': 'br-storage', - 'members': [ - {'type': 'interface', 'name': 'eth0'}, - {'type': 'interface', 'name': 'eth1'}, - ], - }] - tmp = self.temporary_nic_config_file(bridges) - self.cmd.error_count = 0 - self.cmd.NIC_validate( - 'OS::TripleO::Controller::Net::SoftwareConfig', tmp) - os.unlink(tmp) - self.assertEqual(1, self.cmd.error_count) - - def test_nic_two_bonds(self): - bridges = [{ - 'type': 'ovs_bridge', - 'name': 'br-storage', - 'members': [ - {'type': 'interface', 'name': 'eth0'}, - {'type': 'interface', 'name': 'eth1'}, - {'type': 'ovs_bond', 'name': 'bond1'}, - {'type': 'ovs_bond', 'name': 'bond2'}, - ], - }] - tmp = self.temporary_nic_config_file(bridges) - self.cmd.error_count = 0 - self.cmd.NIC_validate( - 'OS::TripleO::Controller::Net::SoftwareConfig', tmp) - os.unlink(tmp) - self.assertEqual(1, self.cmd.error_count) - - @mock.patch('tripleoclient.v1.overcloud_netenv_validate.' - 'ValidateOvercloudNetenv.NIC_validate', autospec=True) - def test_command(self, mock_nic_validate): - """Testing the command with a minimal file that will fail""" - with tempfile.NamedTemporaryFile('wt') as net_file: - net_file.write(EMPTY_NETENV) - net_file.flush() - - arglist = ['--file', net_file.name] - verifylist = [ - ('netenv', net_file.name), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Validating a minimal file shouldn't raise errors. - self.cmd.take_action(parsed_args) diff --git a/tripleoclient/tests/v1/overcloud_node/__init__.py b/tripleoclient/tests/v1/overcloud_node/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_node/fakes.py b/tripleoclient/tests/v1/overcloud_node/fakes.py deleted file mode 100644 index 212fcede3..000000000 --- a/tripleoclient/tests/v1/overcloud_node/fakes.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import uuid - -from unittest import mock - -from tripleoclient.tests import fakes - - -class TestDeleteNode(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestDeleteNode, self).setUp() - - -class TestOvercloudNode(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudNode, self).setUp() - - self.mock_playbook = mock.patch( - 'tripleoclient.utils.run_ansible_playbook', - autospec=True - ) - self.mock_playbook.start() - self.addCleanup(self.mock_playbook.stop) - - -def make_fake_machine(machine_name, provision_state='manageable', - is_maintenance=False, machine_id=None): - if not machine_id: - machine_id = uuid.uuid4().hex - return(fakes.FakeMachine(id=machine_id, name=machine_name, - provision_state=provision_state, - is_maintenance=is_maintenance)) diff --git a/tripleoclient/tests/v1/overcloud_node/test_overcloud_node.py b/tripleoclient/tests/v1/overcloud_node/test_overcloud_node.py deleted file mode 100644 index 5f97dfa0f..000000000 --- a/tripleoclient/tests/v1/overcloud_node/test_overcloud_node.py +++ /dev/null @@ -1,1505 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -import copy -import fixtures -import json -import os -import tempfile -from unittest import mock - -import openstack - -from osc_lib import exceptions as oscexc -from osc_lib.tests import utils as test_utils -from oslo_utils import units -import yaml - -from tripleoclient import exceptions -from tripleoclient.tests.v1.overcloud_node import fakes -from tripleoclient.v1 import overcloud_node -from tripleoclient.v2 import overcloud_node as overcloud_node_v2 - - -class TestDeleteNode(fakes.TestDeleteNode): - - def setUp(self): - super(TestDeleteNode, self).setUp() - - # Get the command object to test - self.cmd = overcloud_node.DeleteNode(self.app, None) - self.cmd.app_args = mock.Mock(verbose_level=1) - self.tripleoclient = mock.Mock() - - self.stack_name = self.app.client_manager.orchestration.stacks.get - stack = self.stack_name.return_value = mock.Mock( - stack_name="overcloud" - ) - stack.output_show.return_value = {'output': {'output_value': []}} - - wait_stack = mock.patch( - 'tripleoclient.utils.wait_for_stack_ready', - autospec=True - ) - wait_stack.start() - wait_stack.return_value = None - self.addCleanup(wait_stack.stop) - self.app.client_manager.compute.servers.get.return_value = None - - @mock.patch('heatclient.common.event_utils.get_events', - autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_node_delete(self, mock_playbook, - mock_get_events): - argslist = ['instance1', 'instance2', '--stack', 'overcast', - '--timeout', '90', '--yes'] - verifylist = [ - ('stack', 'overcast'), - ('nodes', ['instance1', 'instance2']) - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=False) - def test_node_delete_no_confirm(self, confirm_mock): - argslist = ['instance1', 'instance2', '--stack', 'overcast', - '--timeout', '90'] - verifylist = [ - ('stack', 'overcast'), - ('nodes', ['instance1', 'instance2']) - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - - self.assertRaises(oscexc.CommandError, - self.cmd.take_action, - parsed_args) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True, - side_effect=exceptions.InvalidConfiguration) - def test_node_wrong_stack(self, mock_playbook): - argslist = ['instance1', '--stack', 'overcast', '--yes'] - verifylist = [ - ('stack', 'overcast'), - ('nodes', ['instance1', ]) - ] - self.stack_name.return_value = None - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - - self.assertRaises(exceptions.InvalidConfiguration, - self.cmd.take_action, - parsed_args) - - @mock.patch('heatclient.common.event_utils.get_events', - autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_node_delete_without_stack(self, mock_playbook, - mock_get_events): - arglist = ['instance1', '--yes'] - - verifylist = [ - ('stack', 'overcloud'), - ('nodes', ['instance1']), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch('tripleoclient.utils.get_key') - @mock.patch('tripleoclient.utils.get_default_working_dir') - @mock.patch('heatclient.common.event_utils.get_events', - autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - @mock.patch('tripleoclient.utils.tempfile') - def test_node_delete_baremetal_deployment(self, - mock_tempfile, - mock_playbook, - mock_get_events, - mock_dir, - mock_key): - - bm_yaml = [{ - 'name': 'Compute', - 'count': 5, - 'instances': [{ - 'name': 'baremetal-2', - 'hostname': 'overcast-compute-0', - 'provisioned': False - }], - }, { - 'name': 'Controller', - 'count': 2, - 'instances': [{ - 'name': 'baremetal-1', - 'hostname': 'overcast-controller-1', - 'provisioned': False - }] - }] - - tmp = tempfile.mkdtemp() - mock_tempfile.mkdtemp.side_effect = [ - tmp, - tempfile.mkdtemp(), - tempfile.mkdtemp(), - tempfile.mkdtemp(), - tempfile.mkdtemp() - ] - - mock_dir.return_value = "/home/stack/overcloud-deploy" - ansible_dir = "{}/config-download/overcast".format( - mock_dir.return_value - ) - - inventory = "{}/tripleo-ansible-inventory.yaml".format( - ansible_dir - ) - - ansible_cfg = "{}/ansible.cfg".format( - ansible_dir - ) - - mock_key.return_value = '/home/stack/.ssh/id_rsa_tripleo' - - unprovision_confirm = os.path.join(tmp, 'unprovision_confirm.json') - with open(unprovision_confirm, 'w') as confirm: - confirm.write(json.dumps([ - { - 'hostname': 'overcast-controller-1', - 'name': 'baremetal-1', - 'id': 'aaaa' - }, { - 'hostname': 'overcast-compute-0', - 'name': 'baremetal-2', - 'id': 'bbbb' - } - ])) - - with tempfile.NamedTemporaryFile(mode='w') as inp: - yaml.dump(bm_yaml, inp, encoding='utf-8') - inp.flush() - - argslist = ['--baremetal-deployment', inp.name, '--stack', - 'overcast', '--overcloud-ssh-port-timeout', '42', - '--timeout', '90', '--yes'] - verifylist = [ - ('stack', 'overcast'), - ('overcloud_ssh_port_timeout', 42), - ('baremetal_deployment', inp.name) - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - - self.cmd.take_action(parsed_args) - - # Verify - mock_playbook.assert_has_calls([ - mock.call( - playbook='cli-overcloud-node-unprovision.yaml', - inventory='localhost,', - verbosity=mock.ANY, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - timeout=mock.ANY, - extra_vars={ - 'stack_name': 'overcast', - 'baremetal_deployment': [{ - 'count': 5, - 'instances': [{ - 'hostname': 'overcast-compute-0', - 'name': 'baremetal-2', - 'provisioned': False - }], - 'name': 'Compute' - }, { - 'count': 2, - 'instances': [{ - 'hostname': 'overcast-controller-1', - 'name': 'baremetal-1', - 'provisioned': False - }], 'name': 'Controller' - }], - 'prompt': True, - 'unprovision_confirm': unprovision_confirm, - }, - ), - mock.call( - playbook='scale_playbook.yaml', - inventory=inventory, - workdir=ansible_dir, - playbook_dir=ansible_dir, - ansible_cfg=ansible_cfg, - ssh_user='tripleo-admin', - limit_hosts='overcast-controller-1:overcast-compute-0', - reproduce_command=True, - ignore_unreachable=True, - timeout=mock.ANY, - extra_env_variables={ - "ANSIBLE_BECOME": True, - "ANSIBLE_PRIVATE_KEY_FILE": - "/home/stack/.ssh/id_rsa_tripleo" - } - ), - mock.call( - inventory='localhost,', - playbook='cli-overcloud-node-unprovision.yaml', - verbosity=mock.ANY, - workdir=mock.ANY, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - timeout=mock.ANY, - extra_vars={ - 'stack_name': 'overcast', - 'baremetal_deployment': [{ - 'count': 5, - 'instances': [{ - 'hostname': 'overcast-compute-0', - 'name': 'baremetal-2', - 'provisioned': False - }], 'name': 'Compute' - }, { - 'count': 2, - 'instances': [{ - 'hostname': 'overcast-controller-1', - 'name': 'baremetal-1', - 'provisioned': False - }], - 'name': 'Controller' - }], - 'prompt': False, - 'manage_network_ports': True - }, - ) - ]) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - @mock.patch('tripleoclient.utils.tempfile') - def test_nodes_to_delete(self, mock_tempfile, mock_playbook): - bm_yaml = [{ - 'name': 'Compute', - 'count': 5, - 'instances': [{ - 'name': 'baremetal-2', - 'hostname': 'overcast-compute-0', - 'provisioned': False - }], - }, { - 'name': 'Controller', - 'count': 2, - 'instances': [{ - 'name': 'baremetal-1', - 'hostname': 'overcast-controller-1', - 'provisioned': False - }] - }] - - tmp = tempfile.mkdtemp() - mock_tempfile.mkdtemp.return_value = tmp - - unprovision_confirm = os.path.join(tmp, 'unprovision_confirm.json') - with open(unprovision_confirm, 'w') as confirm: - confirm.write(json.dumps([ - { - 'hostname': 'compute-0', - 'name': 'baremetal-1', - 'id': 'aaaa' - }, { - 'hostname': 'controller-0', - 'name': 'baremetal-2', - 'id': 'bbbb' - } - ])) - - argslist = ['--baremetal-deployment', '/foo/bm_deploy.yaml'] - verifylist = [ - ('baremetal_deployment', '/foo/bm_deploy.yaml') - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - nodes_text, nodes = self.cmd._nodes_to_delete(parsed_args, bm_yaml) - expected = '''+--------------+-------------+------+ -| hostname | name | id | -+--------------+-------------+------+ -| compute-0 | baremetal-1 | aaaa | -| controller-0 | baremetal-2 | bbbb | -+--------------+-------------+------+ -''' - self.assertEqual(expected, nodes_text) - self.assertEqual(['compute-0', 'controller-0'], nodes) - - def test_check_skiplist_exists(self): - mock_warning = mock.MagicMock() - mock_log = mock.MagicMock() - mock_log.warning = mock_warning - env = {'parameter_defaults': {}} - - old_logger = self.cmd.log - self.cmd.log = mock_log - self.cmd._check_skiplist_exists(env) - self.cmd.log = old_logger - mock_warning.assert_not_called() - - def test_check_skiplist_exists_empty(self): - mock_warning = mock.MagicMock() - mock_log = mock.MagicMock() - mock_log.warning = mock_warning - env = {'parameter_defaults': {'DeploymentServerBlacklist': []}} - - old_logger = self.cmd.log - self.cmd.log = mock_log - self.cmd._check_skiplist_exists(env) - self.cmd.log = old_logger - mock_warning.assert_not_called() - - def test_check_skiplist_exists_warns(self): - mock_warning = mock.MagicMock() - mock_log = mock.MagicMock() - mock_log.warning = mock_warning - env = {'parameter_defaults': {'DeploymentServerBlacklist': ['a']}} - - old_logger = self.cmd.log - self.cmd.log = mock_log - self.cmd._check_skiplist_exists(env) - self.cmd.log = old_logger - expected_message = ('[WARNING] DeploymentServerBlacklist is ignored ' - 'when executing scale down actions. If the ' - 'node(s) being removed should *NOT* have any ' - 'actions executed on them, please shut them off ' - 'prior to their removal.') - mock_warning.assert_called_once_with(expected_message) - - -@mock.patch.object(openstack.baremetal.v1._proxy, 'Proxy', - autospec=True, name='mock_bm') -@mock.patch('openstack.config', autospec=True, - name='mock_conf') -@mock.patch('openstack.connect', autospec=True, - name='mock_connect') -@mock.patch.object(openstack.connection, - 'Connection', autospec=True) -class TestProvideNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestProvideNode, self).setUp() - # Get the command object to test - self.cmd = overcloud_node.ProvideNode(self.app, None) - - iterate_timeout = mock.MagicMock() - iterate_timeout.start() - - self.fake_baremetal_node = fakes.make_fake_machine( - machine_name='node1', - machine_id='4e540e11-1366-4b57-85d5-319d168d98a1' - ) - self.fake_baremetal_node2 = fakes.make_fake_machine( - machine_name='node2', - machine_id='9070e42d-1ad7-4bd0-b868-5418bc9c7176' - ) - - def test_provide_all_manageable_nodes(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node2]) - ] - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2] - - parsed_args = self.check_parser(self.cmd, - ['--all-manageable'], - [('all_manageable', True)]) - self.cmd.take_action(parsed_args) - - def test_provide_one_node(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - node_id = 'node_uuid1' - - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node] - - parsed_args = self.check_parser(self.cmd, - [node_id], - [('node_uuids', [node_id])]) - self.cmd.take_action(parsed_args) - - def test_provide_multiple_nodes(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - node_id1 = 'node_uuid1' - node_id2 = 'node_uuid2' - - argslist = [node_id1, node_id2] - verifylist = [('node_uuids', [node_id1, node_id2])] - - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2 - ] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - -@mock.patch.object(openstack.baremetal.v1._proxy, 'Proxy', - autospec=True, name='mock_bm') -@mock.patch('openstack.config', autospec=True, - name='mock_conf') -@mock.patch('openstack.connect', autospec=True, - name='mock_connect') -@mock.patch.object(openstack.connection, - 'Connection', autospec=True) -class TestCleanNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestCleanNode, self).setUp() - - # Get the command object to test - self.cmd = overcloud_node.CleanNode(self.app, None) - - self.fake_baremetal_node = fakes.make_fake_machine( - machine_name='node1', - machine_id='4e540e11-1366-4b57-85d5-319d168d98a1' - ) - self.fake_baremetal_node2 = fakes.make_fake_machine( - machine_name='node2', - machine_id='9070e42d-1ad7-4bd0-b868-5418bc9c7176' - ) - - def _check_clean_all_manageable(self, parsed_args, mock_conn, - mock_connect, mock_conf, - mock_bm, - provide=False): - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node]) - ] - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node] - self.cmd.take_action(parsed_args) - - def _check_clean_nodes(self, parsed_args, nodes, mock_conn, - mock_connect, mock_conf, - mock_bm, provide=False): - self.cmd.take_action(parsed_args) - - def test_clean_all_manageable_nodes_without_provide(self, mock_conn, - mock_connect, - mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.nodes.return_value = iter([ - self.fake_baremetal_node - ]) - parsed_args = self.check_parser(self.cmd, - ['--all-manageable'], - [('all_manageable', True)]) - self._check_clean_all_manageable(parsed_args, mock_conn, - mock_connect, mock_conf, - mock_bm, provide=False) - - def test_clean_all_manageable_nodes_with_provide(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node])] - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node] - parsed_args = self.check_parser(self.cmd, - ['--all-manageable', '--provide'], - [('all_manageable', True), - ('provide', True)]) - self._check_clean_all_manageable(parsed_args, mock_conn, - mock_connect, mock_conf, - mock_bm, provide=False) - - def test_clean_nodes_without_provide(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - nodes = ['node_uuid1', 'node_uuid2'] - parsed_args = self.check_parser(self.cmd, - nodes, - [('node_uuids', nodes)]) - self._check_clean_nodes(parsed_args, nodes, mock_conn, - mock_connect, mock_conf, - mock_bm, provide=False) - - def test_clean_nodes_with_provide(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - - nodes = ['node_uuid1', 'node_uuid2'] - argslist = nodes + ['--provide'] - - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2 - ] - - parsed_args = self.check_parser(self.cmd, - argslist, - [('node_uuids', nodes), - ('provide', True)]) - self._check_clean_nodes(parsed_args, nodes, mock_conn, - mock_connect, mock_conf, - mock_bm, provide=False) - - -class TestImportNodeMultiArch(fakes.TestOvercloudNode): - - def setUp(self): - super(TestImportNodeMultiArch, self).setUp() - - self.nodes_list = [{ - "pm_user": "stack", - "pm_addr": "192.168.122.1", - "pm_password": "KEY1", - "pm_type": "pxe_ssh", - "mac": [ - "00:0b:d0:69:7e:59" - ], - }, { - "pm_user": "stack", - "pm_addr": "192.168.122.2", - "pm_password": "KEY2", - "pm_type": "pxe_ssh", - "arch": "x86_64", - "mac": [ - "00:0b:d0:69:7e:58" - ] - }, { - "pm_user": "stack", - "pm_addr": "192.168.122.3", - "pm_password": "KEY3", - "pm_type": "pxe_ssh", - "arch": "x86_64", - "platform": "SNB", - "mac": [ - "00:0b:d0:69:7e:58" - ] - }] - self.json_file = tempfile.NamedTemporaryFile( - mode='w', delete=False, suffix='.json') - json.dump(self.nodes_list, self.json_file) - self.json_file.close() - self.addCleanup(os.unlink, self.json_file.name) - - # Get the command object to test - self.cmd = overcloud_node_v2.ImportNode(self.app, None) - - image = collections.namedtuple('image', ['id', 'name']) - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.images.list.return_value = [ - image(id=3, name='overcloud-full'), - image(id=6, name='x86_64-overcloud-full'), - image(id=9, name='SNB-x86_64-overcloud-full'), - ] - - self.http_boot = '/var/lib/ironic/httpboot' - - self.mock_playbook = mock.patch( - "tripleoclient.utils.run_ansible_playbook", spec=True) - self.mock_run_ansible_playbook = self.mock_playbook.start() - self.addCleanup(self.mock_playbook.stop) - - existing = ['agent', 'x86_64/agent', 'SNB-x86_64/agent'] - existing = {os.path.join(self.http_boot, name + ext) - for name in existing for ext in ('.kernel', '.ramdisk')} - - self.useFixture(fixtures.MockPatch( - 'os.path.exists', autospec=True, - side_effect=lambda path: path in existing)) - - def _check_workflow_call(self, parsed_args, introspect=False, - provide=False, local=None, no_deploy_image=False): - file_return_nodes = [ - { - 'uuid': 'MOCK_NODE_UUID' - } - ] - mock_open = mock.mock_open(read_data=json.dumps(file_return_nodes)) - with mock.patch('builtins.open', mock_open): - self.cmd.take_action(parsed_args) - - nodes_list = copy.deepcopy(self.nodes_list) - if not no_deploy_image: - nodes_list[0]['kernel_id'] = ( - 'file://%s/agent.kernel' % self.http_boot) - nodes_list[0]['ramdisk_id'] = ( - 'file://%s/agent.ramdisk' % self.http_boot) - nodes_list[1]['kernel_id'] = ( - 'file://%s/x86_64/agent.kernel' % self.http_boot) - nodes_list[1]['ramdisk_id'] = ( - 'file://%s/x86_64/agent.ramdisk' % self.http_boot) - nodes_list[2]['kernel_id'] = ( - 'file://%s/SNB-x86_64/agent.kernel' % self.http_boot) - nodes_list[2]['ramdisk_id'] = ( - 'file://%s/SNB-x86_64/agent.ramdisk' % self.http_boot) - - if introspect: - self.mock_run_ansible_playbook.assert_called_with( - extra_vars={ - 'node_uuids': ['MOCK_NODE_UUID']}, - inventory='localhost,', - playbook='cli-overcloud-node-provide.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - workdir=mock.ANY, - ) - - def test_import_only(self): - argslist = [self.json_file.name] - verifylist = [('introspect', False), - ('provide', False)] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self._check_workflow_call(parsed_args) - - def test_import_with_netboot(self): - arglist = [self.json_file.name, '--instance-boot-option', 'netboot'] - verifylist = [('instance_boot_option', 'netboot')] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self._check_workflow_call(parsed_args, local=False) - - def test_import_with_no_deployed_image(self): - arglist = [self.json_file.name, '--no-deploy-image'] - verifylist = [('no_deploy_image', True)] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self._check_workflow_call(parsed_args, no_deploy_image=True) - - -@mock.patch.object(openstack.baremetal.v1._proxy, 'Proxy', - autospec=True, name='mock_bm') -@mock.patch('openstack.config', autospec=True, - name='mock_conf') -@mock.patch('openstack.connect', autospec=True, - name='mock_connect') -@mock.patch.object(openstack.connection, - 'Connection', autospec=True) -class TestConfigureNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestConfigureNode, self).setUp() - - # Get the command object to test - self.cmd = overcloud_node.ConfigureNode(self.app, None) - - self.http_boot = '/var/lib/ironic/httpboot' - self.workflow_input = { - 'kernel_name': 'file://%s/agent.kernel' % self.http_boot, - 'ramdisk_name': 'file://%s/agent.ramdisk' % self.http_boot, - 'instance_boot_option': None, - 'root_device': None, - 'root_device_minimum_size': 4, - 'overwrite_root_device_hints': False - } - # Mock disks - self.disks = [ - {'name': '/dev/sda', 'size': 11 * units.Gi}, - {'name': '/dev/sdb', 'size': 2 * units.Gi}, - {'name': '/dev/sdc', 'size': 5 * units.Gi}, - {'name': '/dev/sdd', 'size': 21 * units.Gi}, - {'name': '/dev/sde', 'size': 13 * units.Gi}, - ] - - for i, disk in enumerate(self.disks): - disk['wwn'] = 'wwn%d' % i - disk['serial'] = 'serial%d' % i - - self.fake_baremetal_node = fakes.make_fake_machine( - machine_name='node1', - machine_id='4e540e11-1366-4b57-85d5-319d168d98a1' - ) - self.fake_baremetal_node2 = fakes.make_fake_machine( - machine_name='node2', - machine_id='9070e42d-1ad7-4bd0-b868-5418bc9c7176' - ) - - def test_configure_all_manageable_nodes(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node])] - parsed_args = self.check_parser(self.cmd, - ['--all-manageable'], - [('all_manageable', True)]) - self.cmd.take_action(parsed_args) - - def test_configure_specified_nodes(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - argslist = ['node_uuid1', 'node_uuid2'] - verifylist = [('node_uuids', ['node_uuid1', 'node_uuid2'])] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - def test_configure_no_node_or_flag_specified(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - self.assertRaises(test_utils.ParserException, - self.check_parser, - self.cmd, [], []) - - def test_configure_uuids_and_all_both_specified(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - argslist = ['node_id1', 'node_id2', '--all-manageable'] - verifylist = [('node_uuids', ['node_id1', 'node_id2']), - ('all_manageable', True)] - self.assertRaises(test_utils.ParserException, - self.check_parser, - self.cmd, argslist, verifylist) - - def test_configure_kernel_and_ram(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal_introspection = mock_bm - - introspector_client = mock_bm.baremetal_introspection - introspector_client.get_introspection_data = mock_bm - introspector_client.get_introspection_data.return_value = { - 'inventory': {'disks': self.disks} - } - - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node])] - - argslist = ['--all-manageable', '--deploy-ramdisk', 'test_ramdisk', - '--deploy-kernel', 'test_kernel'] - verifylist = [('deploy_kernel', 'test_kernel'), - ('deploy_ramdisk', 'test_ramdisk')] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - def test_configure_instance_boot_option(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node])] - argslist = ['--all-manageable', '--instance-boot-option', 'netboot'] - verifylist = [('instance_boot_option', 'netboot')] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - def test_configure_root_device(self, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal_introspection = mock_bm - - introspector_client = mock_bm.baremetal_introspection - introspector_client.get_introspection_data = mock_bm - introspector_client.get_introspection_data.return_value = { - 'inventory': {'disks': self.disks} - } - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node])] - argslist = ['--all-manageable', - '--root-device', 'smallest', - '--root-device-minimum-size', '2', - '--overwrite-root-device-hints'] - verifylist = [('root_device', 'smallest'), - ('root_device_minimum_size', 2), - ('overwrite_root_device_hints', True)] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - @mock.patch('tripleoclient.workflows.baremetal.' - '_apply_root_device_strategy') - def test_configure_specified_node_with_all_arguments( - self, mock_root_device, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal_introspection = mock_bm - - introspector_client = mock_bm.baremetal_introspection - introspector_client.get_introspection_data = mock_bm - introspector_client.get_introspection_data.return_value = { - 'inventory': {'disks': self.disks} - } - - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node]), - iter([self.fake_baremetal_node])] - - argslist = ['node_id', - '--deploy-kernel', 'test_kernel', - '--deploy-ramdisk', 'test_ramdisk', - '--instance-boot-option', 'netboot', - '--root-device', 'smallest', - '--root-device-minimum-size', '2', - '--overwrite-root-device-hints'] - verifylist = [('node_uuids', ['node_id']), - ('deploy_kernel', 'test_kernel'), - ('deploy_ramdisk', 'test_ramdisk'), - ('instance_boot_option', 'netboot'), - ('root_device', 'smallest'), - ('root_device_minimum_size', 2), - ('overwrite_root_device_hints', True)] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - -@mock.patch.object(openstack.baremetal.v1._proxy, 'Proxy', autospec=True, - name="mock_bm") -@mock.patch('openstack.config', autospec=True, name='mock_conf') -@mock.patch('openstack.connect', autospec=True, name='mock_connect') -@mock.patch.object(openstack.connection, 'Connection', autospec=True) -@mock.patch('tripleo_common.utils.nodes._populate_node_mapping', - name='mock_nodemap') -@mock.patch('tripleo_common.utils.nodes.register_all_nodes', - name='mock_tcnode') -@mock.patch('oslo_concurrency.processutils.execute', - name="mock_subproc") -class TestDiscoverNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestDiscoverNode, self).setUp() - - self.cmd = overcloud_node.DiscoverNode(self.app, None) - - self.gcn = mock.patch( - 'tripleoclient.workflows.baremetal._get_candidate_nodes', - autospec=True - ) - self.gcn.start() - self.addCleanup(self.gcn.stop) - - self.http_boot = '/var/lib/ironic/httpboot' - self.fake_baremetal_node = fakes.make_fake_machine( - machine_name='node1', - machine_id='4e540e11-1366-4b57-85d5-319d168d98a1' - ) - self.fake_baremetal_node2 = fakes.make_fake_machine( - machine_name='node2', - machine_id='9070e42d-1ad7-4bd0-b868-5418bc9c7176' - ) - - def test_with_ip_range(self, mock_subproc, mock_tcnode, - mock_nodemap, mock_conn, - mock_connect, mock_conf, - mock_bm): - argslist = ['--range', '10.0.0.0/24', - '--credentials', 'admin:password'] - verifylist = [('ip_addresses', '10.0.0.0/24'), - ('credentials', ['admin:password'])] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - def test_with_address_list(self, mock_subproc, mock_tcnode, - mock_nodemap, mock_conn, - mock_connect, mock_conf, - mock_bm): - argslist = ['--ip', '10.0.0.1', '--ip', '10.0.0.2', - '--credentials', 'admin:password'] - verifylist = [('ip_addresses', ['10.0.0.1', '10.0.0.2']), - ('credentials', ['admin:password'])] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - def test_with_all_options(self, mock_subproc, mock_tcnode, - mock_nodemap, mock_conn, - mock_connect, mock_conf, - mock_bm): - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2, - self.fake_baremetal_node, - self.fake_baremetal_node2 - ] - argslist = ['--range', '10.0.0.0/24', - '--credentials', 'admin:password', - '--credentials', 'admin2:password2', - '--port', '623', '--port', '6230', - '--introspect', '--provide', '--run-validations', - '--no-deploy-image', '--instance-boot-option', 'netboot', - '--concurrency', '10'] - verifylist = [('ip_addresses', '10.0.0.0/24'), - ('credentials', ['admin:password', 'admin2:password2']), - ('port', [623, 6230]), - ('introspect', True), - ('run_validations', True), - ('concurrency', 10), - ('provide', True), - ('no_deploy_image', True), - ('instance_boot_option', 'netboot')] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - - -class TestExtractProvisionedNode(test_utils.TestCommand): - - def setUp(self): - super(TestExtractProvisionedNode, self).setUp() - - self.orchestration = mock.Mock() - self.app.client_manager.orchestration = self.orchestration - - self.baremetal = mock.Mock() - self.app.client_manager.baremetal = self.baremetal - - self.network = mock.Mock() - self.app.client_manager.network = self.network - - self.cmd = overcloud_node.ExtractProvisionedNode(self.app, None) - - roles_data = [ - {'name': 'Controller', - 'default_route_networks': ['External'], - 'networks_skip_config': ['Tenant']}, - {'name': 'Compute'} - ] - - networks_data = [] - - self.stack_dict = { - 'parameters': { - 'ComputeHostnameFormat': '%stackname%-novacompute-%index%', - 'ControllerHostnameFormat': '%stackname%-controller-%index%', - 'ControllerNetworkConfigTemplate': 'templates/controller.j2' - }, - 'outputs': [{ - 'output_key': 'TripleoHeatTemplatesJinja2RenderingDataSources', - 'output_value': { - 'roles_data': roles_data, - 'networks_data': networks_data, - } - }, { - 'output_key': 'AnsibleHostVarsMap', - 'output_value': { - 'Compute': [ - 'overcloud-novacompute-0' - ], - 'Controller': [ - 'overcloud-controller-0', - 'overcloud-controller-1', - 'overcloud-controller-2' - ], - } - }, { - 'output_key': 'RoleNetIpMap', - 'output_value': { - 'Compute': { - 'ctlplane': ['192.168.26.11'], - 'internal_api': ['172.17.1.23'], - }, - 'Controller': { - 'ctlplane': ['192.168.25.21', - '192.168.25.25', - '192.168.25.28'], - 'external': ['10.0.0.199', - '10.0.0.197', - '10.0.0.191'], - 'internal_api': ['172.17.0.37', - '172.17.0.33', - '172.17.0.39'], - } - } - }] - } - - self.nodes = [ - mock.Mock(), - mock.Mock(), - mock.Mock(), - mock.Mock() - ] - self.nodes[0].name = 'bm-0' - self.nodes[0].id = 'bm-0-uuid' - self.nodes[0].resource_class = 'controller' - self.nodes[1].name = 'bm-1' - self.nodes[1].id = 'bm-1-uuid' - self.nodes[1].resource_class = 'controller' - self.nodes[2].name = 'bm-2' - self.nodes[2].id = 'bm-2-uuid' - self.nodes[2].resource_class = None - self.nodes[3].name = 'bm-3' - self.nodes[3].id = 'bm-3-uuid' - self.nodes[3].resource_class = 'compute' - - self.nodes[0].instance_info = { - 'display_name': 'overcloud-controller-0'} - self.nodes[1].instance_info = { - 'display_name': 'overcloud-controller-1'} - self.nodes[2].instance_info = { - 'display_name': 'overcloud-controller-2'} - self.nodes[3].instance_info = { - 'display_name': 'overcloud-novacompute-0'} - - self.networks = [ - mock.Mock(), # ctlplane - mock.Mock(), # external - mock.Mock(), # internal_api - ] - self.ctlplane_net = self.networks[0] - self.external_net = self.networks[1] - self.internal_api_net = self.networks[2] - - self.ctlplane_net.id = 'ctlplane_id' - self.ctlplane_net.name = 'ctlplane' - self.ctlplane_net.subnet_ids = ['ctlplane_a_id', - 'ctlplane_b_id'] - self.external_net.id = 'external_id' - self.external_net.name = 'external' - self.external_net.subnet_ids = ['external_a_id'] - self.internal_api_net.id = 'internal_api_id' - self.internal_api_net.name = 'internal_api' - self.internal_api_net.subnet_ids = ['internal_api_a_id', - 'internal_api_b_id'] - - self.subnets = [ - mock.Mock(), # ctlplane_a - mock.Mock(), # ctlplane_b - mock.Mock(), # external_a - mock.Mock(), # internal_api_a - mock.Mock(), # internal_api_b - ] - self.ctlplane_a = self.subnets[0] - self.ctlplane_b = self.subnets[1] - self.external_a = self.subnets[2] - self.int_api_a = self.subnets[3] - self.int_api_b = self.subnets[4] - - self.ctlplane_a.id = 'ctlplane_a_id' - self.ctlplane_a.name = 'ctlplane_a' - self.ctlplane_a.cidr = '192.168.25.0/24' - self.ctlplane_b.id = 'ctlplane_b_id' - self.ctlplane_b.name = 'ctlplane_b' - self.ctlplane_b.cidr = '192.168.26.0/24' - - self.external_a.id = 'external_a_id' - self.external_a.name = 'external_a' - self.external_a.cidr = '10.0.0.0/24' - - self.int_api_a.id = 'internal_api_a_id' - self.int_api_a.name = 'internal_api_a' - self.int_api_a.cidr = '172.17.0.0/24' - self.int_api_b.id = 'internal_api_b_id' - self.int_api_b.name = 'internal_api_b' - self.int_api_b.cidr = '172.17.1.0/24' - - self.network.find_network.side_effect = [ - # compute-0 - self.ctlplane_net, self.internal_api_net, - # controller-0 - self.ctlplane_net, self.external_net, self.internal_api_net, - # controller-1 - self.ctlplane_net, self.external_net, self.internal_api_net, - # controller-2 - self.ctlplane_net, self.external_net, self.internal_api_net, - ] - self.network.get_subnet.side_effect = [ - # compute-0 - self.ctlplane_a, self.ctlplane_b, self.int_api_a, self.int_api_b, - # controller-0 - self.ctlplane_a, self.external_a, self.int_api_a, - # controller-1 - self.ctlplane_a, self.external_a, self.int_api_a, - # controller-2 - self.ctlplane_a, self.external_a, self.int_api_a, - ] - - self.extract_file = tempfile.NamedTemporaryFile( - mode='w', delete=False, suffix='.yaml') - self.extract_file.close() - - self.roles_file = tempfile.NamedTemporaryFile( - mode='w', delete=False, suffix='.yaml') - self.roles_file.write(yaml.safe_dump(roles_data)) - self.roles_file.close() - - self.networks_file = tempfile.NamedTemporaryFile( - mode='w', delete=False, suffix='.yaml') - self.networks_file.write(yaml.safe_dump(networks_data)) - self.networks_file.close() - - self.working_dir = tempfile.TemporaryDirectory() - - self.addCleanup(os.unlink, self.extract_file.name) - self.addCleanup(os.unlink, self.roles_file.name) - self.addCleanup(os.unlink, self.networks_file.name) - - def test_extract(self): - stack = mock.Mock() - stack.to_dict.return_value = self.stack_dict - stack.environment.return_value = {} - self.orchestration.stacks.get.return_value = stack - - self.baremetal.node.list.return_value = self.nodes - argslist = ['--output', self.extract_file.name, - '--yes'] - self.app.command_options = argslist - verifylist = [('output', self.extract_file.name), - ('yes', True)] - - parsed_args = self.check_parser(self.cmd, - argslist, verifylist) - self.cmd.take_action(parsed_args) - - result = self.cmd.app.stdout.make_string() - self.assertEqual([{ - 'name': 'Compute', - 'count': 1, - 'hostname_format': '%stackname%-novacompute-%index%', - 'defaults': { - 'network_config': {'network_config_update': False, - 'physical_bridge_name': 'br-ex', - 'public_interface_name': 'nic1', - 'template': None}, - 'networks': [{'network': 'ctlplane', - 'vif': True}, - {'network': 'internal_api', - 'subnet': 'internal_api_b'}] - }, - 'instances': [{ - 'hostname': 'overcloud-novacompute-0', - 'name': 'bm-3-uuid', - 'resource_class': 'compute', - }], - }, { - 'name': 'Controller', - 'count': 3, - 'hostname_format': '%stackname%-controller-%index%', - 'defaults': { - 'network_config': {'default_route_network': ['External'], - 'network_config_update': False, - 'networks_skip_config': ['Tenant'], - 'physical_bridge_name': 'br-ex', - 'public_interface_name': 'nic1', - 'template': 'templates/controller.j2'}, - 'networks': [{'network': 'ctlplane', - 'vif': True}, - {'network': 'external', - 'subnet': 'external_a'}, - {'network': 'internal_api', - 'subnet': 'internal_api_a'}] - }, - 'instances': [{ - 'hostname': 'overcloud-controller-0', - 'name': 'bm-0-uuid', - 'resource_class': 'controller', - }, { - 'hostname': 'overcloud-controller-1', - 'name': 'bm-1-uuid', - 'resource_class': 'controller', - }, { - 'hostname': 'overcloud-controller-2', - 'name': 'bm-2-uuid', - }], - }], yaml.safe_load(result)) - - with open(self.extract_file.name) as f: - file_content = f.read() - self.assertEqual(yaml.safe_load(result), yaml.safe_load(file_content)) - self.assertIn('# WARNING: No network config found for role Compute. ' - 'Please edit the file and set the path to the correct ' - 'network config template.\n', file_content) - - def test_extract_ips_from_pool(self): - stack = mock.Mock() - stack.to_dict.return_value = self.stack_dict - stack.environment.return_value = { - 'parameter_defaults': { - 'ComputeIPs': - self.stack_dict['outputs'][1]['output_value']['Compute'], - 'ControllerIPs': - self.stack_dict['outputs'][1]['output_value']['Controller'] - } - } - self.orchestration.stacks.get.return_value = stack - - self.baremetal.node.list.return_value = self.nodes - argslist = ['--roles-file', self.roles_file.name, - '--networks-file', self.networks_file.name, - '--output', self.extract_file.name, - '--yes'] - self.app.command_options = argslist - verifylist = [('roles_file', self.roles_file.name), - ('networks_file', self.networks_file.name), - ('output', self.extract_file.name), - ('yes', True)] - - parsed_args = self.check_parser(self.cmd, - argslist, verifylist) - self.cmd.take_action(parsed_args) - - result = self.cmd.app.stdout.make_string() - self.assertEqual([{ - 'name': 'Compute', - 'count': 1, - 'hostname_format': '%stackname%-novacompute-%index%', - 'defaults': { - 'network_config': {'network_config_update': False, - 'physical_bridge_name': 'br-ex', - 'public_interface_name': 'nic1', - 'template': None}, - 'networks': [{'network': 'ctlplane', - 'vif': True}, - {'network': 'internal_api', - 'subnet': 'internal_api_b'}] - }, - 'instances': [{ - 'hostname': 'overcloud-novacompute-0', - 'name': 'bm-3-uuid', - 'resource_class': 'compute', - 'networks': [{'fixed_ip': '192.168.26.11', - 'network': 'ctlplane', - 'vif': True}, - {'fixed_ip': '172.17.1.23', - 'network': 'internal_api', - 'subnet': 'internal_api_b'}], - }], - }, { - 'name': 'Controller', - 'count': 3, - 'hostname_format': '%stackname%-controller-%index%', - 'defaults': { - 'network_config': {'default_route_network': ['External'], - 'network_config_update': False, - 'networks_skip_config': ['Tenant'], - 'physical_bridge_name': 'br-ex', - 'public_interface_name': 'nic1', - 'template': 'templates/controller.j2'}, - 'networks': [{'network': 'ctlplane', - 'vif': True}, - {'network': 'external', - 'subnet': 'external_a'}, - {'network': 'internal_api', - 'subnet': 'internal_api_a'}], - }, - 'instances': [{ - 'hostname': 'overcloud-controller-0', - 'name': 'bm-0-uuid', - 'resource_class': 'controller', - 'networks': [{'fixed_ip': '192.168.25.21', - 'network': 'ctlplane', - 'vif': True}, - {'fixed_ip': '10.0.0.199', - 'network': 'external', - 'subnet': 'external_a'}, - {'fixed_ip': '172.17.0.37', - 'network': 'internal_api', - 'subnet': 'internal_api_a'}], - }, { - 'hostname': 'overcloud-controller-1', - 'name': 'bm-1-uuid', - 'resource_class': 'controller', - 'networks': [{'fixed_ip': '192.168.25.25', - 'network': 'ctlplane', - 'vif': True}, - {'fixed_ip': '10.0.0.197', - 'network': 'external', - 'subnet': 'external_a'}, - {'fixed_ip': '172.17.0.33', - 'network': 'internal_api', - 'subnet': 'internal_api_a'}], - }, { - 'hostname': 'overcloud-controller-2', - 'name': 'bm-2-uuid', - 'networks': [{'fixed_ip': '192.168.25.28', - 'network': 'ctlplane', - 'vif': True}, - {'fixed_ip': '10.0.0.191', - 'network': 'external', - 'subnet': 'external_a'}, - {'fixed_ip': '172.17.0.39', - 'network': 'internal_api', - 'subnet': 'internal_api_a'}], - }], - }], yaml.safe_load(result)) - - with open(self.extract_file.name) as f: - self.assertEqual(yaml.safe_load(result), yaml.safe_load(f)) - - @mock.patch('tripleoclient.utils.run_command_and_log', autospec=True) - def test_extract_convert_nic_configs(self, mock_run_cmd): - stack = mock.Mock() - stack.stack_name = 'overcloud' - stack.to_dict.return_value = self.stack_dict - stack.files.return_value = { - 'https://1.1.1.1:13808/v1/AUTH_xx/overcloud/user-files/' - 'home/stack/overcloud/compute-net-config.yaml': 'FAKE_CONTENT'} - stack.environment.return_value = { - 'resource_registry': { - 'OS::TripleO::Compute::Net::SoftwareConfig': - 'https://1.1.1.1:13808/v1/AUTH_xx/overcloud/user-files/' - 'home/stack/overcloud/compute-net-config.yaml' - } - } - self.orchestration.stacks.get.return_value = stack - - self.baremetal.node.list.return_value = self.nodes - - mock_run_cmd.return_value = 0 - - argslist = ['--output', self.extract_file.name, - '--working-dir', self.working_dir.name, - '--yes'] - self.app.command_options = argslist - verifylist = [('output', self.extract_file.name), - ('working_dir', self.working_dir.name), - ('yes', True)] - parsed_args = self.check_parser(self.cmd, - argslist, verifylist) - self.cmd.take_action(parsed_args) - - result = self.cmd.app.stdout.make_string() - heat_nic_conf_path = os.path.join(self.working_dir.name, - 'nic-configs', - 'compute-net-config.yaml') - cmd = ['/usr/share/openstack-tripleo-heat-templates/tools/' - 'convert_heat_nic_config_to_ansible_j2.py', - '--yes', - '--stack', stack.stack_name, - '--networks_file', mock.ANY, - heat_nic_conf_path] - mock_run_cmd.assert_called_once_with(mock.ANY, cmd) - self.assertEqual([{ - 'name': 'Compute', - 'count': 1, - 'hostname_format': '%stackname%-novacompute-%index%', - 'defaults': { - 'network_config': {'network_config_update': False, - 'physical_bridge_name': 'br-ex', - 'public_interface_name': 'nic1', - 'template': - os.path.join(self.working_dir.name, - 'nic-configs', - 'compute-net-config.j2')}, - 'networks': [{'network': 'ctlplane', - 'vif': True}, - {'network': 'internal_api', - 'subnet': 'internal_api_b'}] - }, - 'instances': [{ - 'hostname': 'overcloud-novacompute-0', - 'resource_class': 'compute', - 'name': 'bm-3-uuid' - }], - }, { - 'name': 'Controller', - 'count': 3, - 'hostname_format': '%stackname%-controller-%index%', - 'defaults': { - 'network_config': {'default_route_network': ['External'], - 'network_config_update': False, - 'networks_skip_config': ['Tenant'], - 'physical_bridge_name': 'br-ex', - 'public_interface_name': 'nic1', - 'template': 'templates/controller.j2'}, - 'networks': [{'network': 'ctlplane', - 'vif': True}, - {'network': 'external', - 'subnet': 'external_a'}, - {'network': 'internal_api', - 'subnet': 'internal_api_a'}] - }, - 'instances': [{ - 'hostname': 'overcloud-controller-0', - 'resource_class': 'controller', - 'name': 'bm-0-uuid' - }, { - 'hostname': 'overcloud-controller-1', - 'resource_class': 'controller', - 'name': 'bm-1-uuid' - }, { - 'hostname': 'overcloud-controller-2', - 'name': 'bm-2-uuid' - }], - }], yaml.safe_load(result)) - - with open(self.extract_file.name) as f: - file_content = f.read() - self.assertEqual(yaml.safe_load(result), yaml.safe_load(file_content)) - self.assertIn('WARNING: Network config for role Compute was ' - 'automatically converted from Heat template to Ansible ' - 'Jinja2 template. Please review the file: {}\n' - .format(os.path.join(self.working_dir.name, - 'nic-configs', - 'compute-net-config.j2')), - file_content) - - def test_extract_empty(self): - stack_dict = { - 'parameters': {}, - 'outputs': [] - } - stack = mock.Mock() - stack.to_dict.return_value = stack_dict - self.orchestration.stacks.get.return_value = stack - - nodes = [] - - self.baremetal.node.list.return_value = nodes - - argslist = ['--roles-file', self.roles_file.name, - '--networks-file', self.networks_file.name] - self.app.command_options = argslist - verifylist = [('roles_file', self.roles_file.name), - ('networks_file', self.networks_file.name)] - - parsed_args = self.check_parser(self.cmd, - argslist, verifylist) - self.cmd.take_action(parsed_args) - result = self.cmd.app.stdout.make_string() - self.assertIsNone(yaml.safe_load(result)) diff --git a/tripleoclient/tests/v1/overcloud_restore/__init__.py b/tripleoclient/tests/v1/overcloud_restore/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_restore/test_backup.py b/tripleoclient/tests/v1/overcloud_restore/test_backup.py deleted file mode 100644 index b2ef51f1e..000000000 --- a/tripleoclient/tests/v1/overcloud_restore/test_backup.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient import constants -from tripleoclient.tests import fakes -from tripleoclient.v1 import overcloud_restore - - -class TestOvercloudRestore(utils.TestCommand): - - def setUp(self): - super(TestOvercloudRestore, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_restore.RestoreOvercloud(self.app, app_args) - self.inventory = '/tmp/test_inventory.yaml' - self.file = open(self.inventory, 'w').close() - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_restore_controller_(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--stack', - 'overcloud', - '--node-name', - 'overcloud-controller-0' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - parameter = 'tripleo_backup_and_restore_overcloud_restore_name' - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-restore-node.yaml', - inventory=constants.ANSIBLE_INVENTORY.format('overcloud'), - tags=None, - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={ - parameter: arglist[3] - }, - ssh_user='stack' - ) diff --git a/tripleoclient/tests/v1/overcloud_roles/__init__.py b/tripleoclient/tests/v1/overcloud_roles/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_roles/test_overcloud_roles.py b/tripleoclient/tests/v1/overcloud_roles/test_overcloud_roles.py deleted file mode 100644 index bdb9d5bcf..000000000 --- a/tripleoclient/tests/v1/overcloud_roles/test_overcloud_roles.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleo_common.exception import NotFound -from tripleoclient.tests.v1.overcloud_deploy import fakes -from tripleoclient.v1 import overcloud_roles - - -class TestOvercloudRolesListAvailable(fakes.TestDeployOvercloud): - - def setUp(self): - super(TestOvercloudRolesListAvailable, self).setUp() - self.cmd = overcloud_roles.RoleList(self.app, None) - - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - @mock.patch('os.path.realpath') - def test_action(self, realpath_mock, get_roles_mock): - realpath_mock.return_value = '/foo' - get_roles_mock.return_value = ['a', 'b'] - self.cmd._get_roles = get_roles_mock - - arglist = [] - verifylist = [ - ('roles_path', '/usr/share/openstack-tripleo-heat-templates/roles') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - get_roles_mock.assert_called_once_with('/foo') - - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - @mock.patch('os.path.realpath') - def test_action_role_path(self, realpath_mock, get_roles_mock): - realpath_mock.return_value = '/tmp' - get_roles_mock.return_value = ['a', 'b'] - self.cmd._get_roles = get_roles_mock - - arglist = ['--roles-path', '/tmp'] - verifylist = [ - ('roles_path', '/tmp') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - get_roles_mock.assert_called_once_with('/tmp') - - -class TestOvercloudRolesGenerateData(fakes.TestDeployOvercloud): - - def setUp(self): - super(TestOvercloudRolesGenerateData, self).setUp() - self.cmd = overcloud_roles.RolesGenerate(self.app, None) - - @mock.patch( - 'tripleo_common.utils.roles.generate_roles_data_from_directory') - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - @mock.patch('os.path.realpath') - def test_action(self, realpath_mock, get_roles_mock, check_mock, - generate_roles_mock): - realpath_mock.return_value = '/tmp' - get_roles_mock.return_value = ['Controller', 'Compute'] - generate_roles_mock.return_value = 'foo' - capture_mock = mock.MagicMock() - self.cmd._capture_output = capture_mock - stop_capture_mock = mock.MagicMock() - self.cmd._stop_capture_output = stop_capture_mock - - arglist = ['--roles-path', '/tmp', 'Controller', 'Compute'] - verifylist = [ - ('roles_path', '/tmp'), - ('roles', ['Controller', 'Compute']) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - get_roles_mock.assert_called_once_with('/tmp') - check_mock.assert_called_once_with(['Controller', 'Compute'], - ['Controller', 'Compute']) - capture_mock.assert_called_once_with(None) - generate_roles_mock.assert_called_once_with('/tmp', - ['Controller', 'Compute'], - True) - stop_capture_mock.assert_called_once_with(None) - - @mock.patch( - 'tripleo_common.utils.roles.generate_roles_data_from_directory') - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - @mock.patch('os.path.realpath') - def test_action_with_outputfile(self, realpath_mock, get_roles_mock, - check_mock, generate_roles_mock): - realpath_mock.return_value = '/tmp' - get_roles_mock.return_value = ['Controller', 'Compute'] - generate_roles_mock.return_value = 'foo' - capture_mock = mock.MagicMock() - self.cmd._capture_output = capture_mock - stop_capture_mock = mock.MagicMock() - self.cmd._stop_capture_output = stop_capture_mock - - arglist = ['--roles-path', '/tmp', '-o', 'foo.yaml', - 'Controller', 'Compute'] - verifylist = [ - ('output_file', 'foo.yaml'), - ('roles_path', '/tmp'), - ('roles', ['Controller', 'Compute']) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - get_roles_mock.assert_called_once_with('/tmp') - check_mock.assert_called_once_with(['Controller', 'Compute'], - ['Controller', 'Compute']) - capture_mock.assert_called_once_with('foo.yaml') - generate_roles_mock.assert_called_once_with('/tmp', - ['Controller', 'Compute'], - True) - stop_capture_mock.assert_called_once_with('foo.yaml') - - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - @mock.patch('os.path.realpath') - def test_action_with_invald_roles(self, realpath_mock, get_roles_mock): - realpath_mock.return_value = '/tmp' - get_roles_mock.return_value = ['Controller', 'Compute'] - - arglist = ['--roles-path', '/tmp', 'Foo', 'Bar'] - verifylist = [ - ('roles_path', '/tmp'), - ('roles', ['Foo', 'Bar']) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(NotFound, self.cmd.take_action, parsed_args) - get_roles_mock.assert_called_once_with('/tmp') - - -class TestOvercloudRoleShow(fakes.TestDeployOvercloud): - - def setUp(self): - super(TestOvercloudRoleShow, self).setUp() - self.cmd = overcloud_roles.RoleShow(self.app, None) - - @mock.patch('yaml.safe_load') - @mock.patch('tripleoclient.v1.overcloud_roles.open') - @mock.patch('os.path.realpath') - def test_action(self, realpath_mock, open_mock, yaml_mock): - realpath_mock.return_value = '/tmp' - yaml_mock.return_value = [{'name': 'foo', 'Services': ['a', 'b']}] - - arglist = ['--roles-path', '/tmp', 'foo'] - verifylist = [ - ('roles_path', '/tmp'), - ('role', 'foo') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - open_mock.assert_called_once_with('/tmp/foo.yaml', 'r') - - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - @mock.patch('tripleoclient.v1.overcloud_roles.open') - @mock.patch('os.path.realpath') - def test_action_invalid_role(self, realpath_mock, open_mock, - get_roles_mock): - realpath_mock.return_value = '/tmp' - open_mock.side_effect = IOError('bar') - get_roles_mock.return_value = ['Controller', 'Compute'] - - arglist = ['--roles-path', '/tmp', 'foo'] - verifylist = [ - ('roles_path', '/tmp'), - ('role', 'foo') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(NotFound, self.cmd.take_action, parsed_args) - open_mock.assert_called_once_with('/tmp/foo.yaml', 'r') diff --git a/tripleoclient/tests/v1/overcloud_update/__init__.py b/tripleoclient/tests/v1/overcloud_update/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_update/fakes.py b/tripleoclient/tests/v1/overcloud_update/fakes.py deleted file mode 100644 index 6424e1996..000000000 --- a/tripleoclient/tests/v1/overcloud_update/fakes.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from tripleoclient.tests import fakes - - -class TestOvercloudUpdatePrepare(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudUpdatePrepare, self).setUp() - - -class TestOvercloudUpdateRun(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudUpdateRun, self).setUp() diff --git a/tripleoclient/tests/v1/overcloud_update/test_overcloud_update.py b/tripleoclient/tests/v1/overcloud_update/test_overcloud_update.py deleted file mode 100644 index 2bce3b552..000000000 --- a/tripleoclient/tests/v1/overcloud_update/test_overcloud_update.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib.tests.utils import ParserException -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient.tests.v1.overcloud_update import fakes -from tripleoclient.v1 import overcloud_update - - -class TestOvercloudUpdatePrepare(fakes.TestOvercloudUpdatePrepare): - - def setUp(self): - super(TestOvercloudUpdatePrepare, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_update.UpdatePrepare(self.app, app_args) - - uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4") - self.mock_uuid4 = uuid4_patcher.start() - self.addCleanup(self.mock_uuid4.stop) - - @mock.patch('tripleoclient.utils.ensure_run_as_normal_user') - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('builtins.open') - @mock.patch('os.path.abspath') - @mock.patch('yaml.safe_load') - @mock.patch('shutil.copytree', autospec=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'take_action', autospec=True) - def test_update_failed(self, mock_deploy, mock_copy, mock_yaml, - mock_abspath, mock_open, - mock_confirm, mock_usercheck): - mock_deploy.side_effect = exceptions.DeploymentError() - mock_yaml.return_value = {'fake_container': 'fake_value'} - argslist = ['--stack', 'overcloud', '--templates', ] - verifylist = [ - ('stack', 'overcloud'), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES), - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - - with mock.patch('os.path.exists') as mock_exists, \ - mock.patch('os.path.isfile') as mock_isfile: - mock_exists.return_value = True - mock_isfile.return_value = True - self.assertRaises(exceptions.DeploymentError, - self.cmd.take_action, parsed_args) - mock_usercheck.assert_called_once() - - @mock.patch('tripleoclient.utils.get_ctlplane_attrs', autospec=True, - return_value={}) - @mock.patch('tripleoclient.utils.ensure_run_as_normal_user') - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('tripleoclient.utils.get_undercloud_host_entry', autospec=True, - return_value='192.168.0.1 uc.ctlplane.localhost uc.ctlplane') - @mock.patch('tripleoclient.v1.overcloud_update.UpdatePrepare.log', - autospec=True) - @mock.patch('os.path.abspath') - @mock.patch('yaml.safe_load') - @mock.patch('shutil.copytree', autospec=True) - @mock.patch('builtins.open') - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'take_action', autospec=True) - def test_update_out(self, mock_deploy, mock_open, mock_copy, mock_yaml, - mock_abspath, mock_logger, - mock_get_undercloud_host_entry, - mock_confirm, mock_usercheck, - mock_get_ctlplane_attrs): - mock_yaml.return_value = {'fake_container': 'fake_value'} - - argslist = ['--stack', 'overcloud', '--templates'] - - verifylist = [ - ('stack', 'overcloud'), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES), - ] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - with mock.patch('os.path.exists') as mock_exists, \ - mock.patch('os.path.isfile') as mock_isfile: - mock_exists.return_value = True - mock_isfile.return_value = True - self.cmd.take_action(parsed_args) - mock_usercheck.assert_called_once() - mock_deploy.assert_called_once() - - -class TestOvercloudUpdateRun(fakes.TestOvercloudUpdateRun): - - def setUp(self): - super(TestOvercloudUpdateRun, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_update.UpdateRun(self.app, app_args) - - uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4") - self.mock_uuid4 = uuid4_patcher.start() - self.addCleanup(self.mock_uuid4.stop) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_update_with_no_limit( - self, mock_open, mock_execute, mock_expanduser, update_ansible, - mock_confirm): - mock_expanduser.return_value = '/home/fake/' - argslist = [] - verifylist = [ - ('static_inventory', None), - ('playbook', 'all') - ] - self.assertRaises(ParserException, lambda: self.check_parser( - self.cmd, argslist, verifylist)) diff --git a/tripleoclient/tests/v1/overcloud_upgrade/__init__.py b/tripleoclient/tests/v1/overcloud_upgrade/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/overcloud_upgrade/fakes.py b/tripleoclient/tests/v1/overcloud_upgrade/fakes.py deleted file mode 100644 index 0ec28847d..000000000 --- a/tripleoclient/tests/v1/overcloud_upgrade/fakes.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from tripleoclient.tests import fakes - - -class TestOvercloudUpgradePrepare(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudUpgradePrepare, self).setUp() - - -class TestOvercloudUpgradeRun(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudUpgradeRun, self).setUp() diff --git a/tripleoclient/tests/v1/overcloud_upgrade/test_overcloud_upgrade.py b/tripleoclient/tests/v1/overcloud_upgrade/test_overcloud_upgrade.py deleted file mode 100644 index a160906f7..000000000 --- a/tripleoclient/tests/v1/overcloud_upgrade/test_overcloud_upgrade.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib.tests.utils import ParserException -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient.tests.v1.overcloud_upgrade import fakes -from tripleoclient.v1 import overcloud_upgrade - - -class TestOvercloudUpgradePrepare(fakes.TestOvercloudUpgradePrepare): - - def setUp(self): - super(TestOvercloudUpgradePrepare, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_upgrade.UpgradePrepare(self.app, app_args) - - uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4") - self.mock_uuid4 = uuid4_patcher.start() - self.addCleanup(self.mock_uuid4.stop) - - @mock.patch('tripleoclient.utils.ensure_run_as_normal_user') - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'take_action') - @mock.patch('tripleoclient.workflows.deployment.' - 'get_hosts_and_enable_ssh_admin', autospec=True) - @mock.patch('tripleoclient.utils.prepend_environment', autospec=True) - @mock.patch('tripleoclient.utils.get_stack') - @mock.patch('tripleoclient.v1.overcloud_upgrade.UpgradePrepare.log', - autospec=True) - @mock.patch('yaml.safe_load') - @mock.patch('builtins.open') - def test_upgrade_out(self, - mock_open, - mock_yaml, - mock_logger, - mock_get_stack, - add_env, - mock_enable_ssh_admin, - mock_overcloud_deploy, - mock_confirm, - mock_usercheck): - - mock_stack = mock.Mock(parameters={'DeployIdentifier': ''}) - mock_get_stack.return_value = mock_stack - mock_stack.stack_name = 'overcloud' - mock_yaml.return_value = {'fake_container': 'fake_value'} - add_env = mock.Mock() - add_env.return_value = True - argslist = ['--stack', 'overcloud', '--templates', - '--overcloud-ssh-enable-timeout', '10', - '--overcloud-ssh-port-timeout', '10'] - verifylist = [ - ('stack', 'overcloud'), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES), - ('overcloud_ssh_enable_timeout', 10), - ('overcloud_ssh_port_timeout', 10), - ] - - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.working_dir = mock.Mock() - self.cmd.take_action(parsed_args) - mock_usercheck.assert_called_once() - - mock_overcloud_deploy.assert_called_once_with(parsed_args) - args, kwargs = mock_overcloud_deploy.call_args - # Check config_download arg is set to False - self.assertEqual(args[0].stack_only, True) - mock_enable_ssh_admin.assert_called_once_with( - parsed_args.stack, - parsed_args.overcloud_ssh_network, - parsed_args.overcloud_ssh_user, - mock.ANY, - parsed_args.overcloud_ssh_port_timeout, - self.cmd.working_dir, - mock.ANY, - 'pod' - ) - - @mock.patch('tripleoclient.utils.ensure_run_as_normal_user') - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('tripleoclient.v1.overcloud_deploy.DeployOvercloud.' - 'take_action') - @mock.patch('tripleoclient.utils.get_stack') - @mock.patch('tripleoclient.utils.prepend_environment', autospec=True) - @mock.patch('builtins.open') - @mock.patch('yaml.safe_load') - def test_upgrade_failed(self, mock_yaml, mock_open, - add_env, mock_get_stack, mock_overcloud_deploy, - mock_confirm, mock_usercheck): - mock_overcloud_deploy.side_effect = exceptions.DeploymentError() - mock_yaml.return_value = {'fake_container': 'fake_value'} - mock_stack = mock.Mock(parameters={'DeployIdentifier': ''}) - mock_stack.stack_name = 'overcloud' - mock_get_stack.return_value = mock_stack - add_env = mock.Mock() - add_env.return_value = True - argslist = ['--stack', 'overcloud', '--templates', ] - verifylist = [ - ('stack', 'overcloud'), - ('templates', constants.TRIPLEO_HEAT_TEMPLATES), - ] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - - self.assertRaises(exceptions.DeploymentError, - self.cmd.take_action, parsed_args) - mock_usercheck.assert_called_once() - mock_overcloud_deploy.assert_called_once_with(parsed_args) - - -class TestOvercloudUpgradeRun(fakes.TestOvercloudUpgradeRun): - - def setUp(self): - super(TestOvercloudUpgradeRun, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = overcloud_upgrade.UpgradeRun(self.app, app_args) - - uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4") - self.mock_uuid4 = uuid4_patcher.start() - self.addCleanup(self.mock_uuid4.stop) - - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_upgrade_limit_with_playbook_and_user( - self, mock_open, mock_execute, mock_expanduser): - mock_expanduser.return_value = '/home/fake/' - argslist = ['--limit', 'Compute, Controller', - '--playbook', 'fake-playbook1.yaml', - 'fake-playbook2.yaml', '--ssh-user', 'tripleo-admin'] - verifylist = [ - ('limit', 'Compute, Controller'), - ('static_inventory', None), - ('playbook', ['fake-playbook1.yaml', 'fake-playbook2.yaml']) - ] - - self.check_parser(self.cmd, argslist, verifylist) - - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_upgrade_nodes_with_playbook_no_skip_tags( - self, mock_open, mock_execute, mock_expanduser): - mock_expanduser.return_value = '/home/fake/' - argslist = ['--limit', 'compute-0,compute-1', - '--playbook', 'fake-playbook.yaml', ] - verifylist = [ - ('limit', 'compute-0,compute-1'), - ('static_inventory', None), - ('playbook', ['fake-playbook.yaml']), - ] - - self.check_parser(self.cmd, argslist, verifylist) - - @mock.patch('os.path.expanduser') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch('builtins.open') - def test_upgrade_with_no_limit( - self, mock_open, mock_execute, mock_expanduser): - mock_expanduser.return_value = '/home/fake/' - argslist = [] - verifylist = [] - self.assertRaises(ParserException, lambda: self.check_parser( - self.cmd, argslist, verifylist)) diff --git a/tripleoclient/tests/v1/test_container_image.py b/tripleoclient/tests/v1/test_container_image.py deleted file mode 100644 index 70d742abc..000000000 --- a/tripleoclient/tests/v1/test_container_image.py +++ /dev/null @@ -1,578 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from io import StringIO -import os -import shutil -import tempfile -from urllib import parse -import yaml -from unittest import mock - -from osc_lib import exceptions as oscexc -from tripleo_common.image import kolla_builder -from tripleoclient.tests.v1.test_plugin import TestPluginV1 -from tripleoclient.v1 import container_image - - -class TestContainerImagePush(TestPluginV1): - def setUp(self): - super(TestContainerImagePush, self).setUp() - - lock = mock.patch('tripleo_common.utils.locks.processlock.ProcessLock') - self.mock_lock = lock.start() - self.addCleanup(lock.stop) - - self.cmd = container_image.TripleOContainerImagePush(self.app, None) - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.UploadTask') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action(self, mock_manager, mock_task, mock_get_uc_registry): - arglist = ['docker.io/namespace/foo'] - verifylist = [('image_to_push', 'docker.io/namespace/foo')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - # mock upload task - mock_uploadtask = mock.Mock() - mock_task.return_value = mock_uploadtask - - # mock add upload task action - mock_add_upload = mock.Mock() - data = [] - mock_add_upload.return_value = data - mock_uploader.add_upload_task = mock_add_upload - - # mock run tasks action - mock_run_tasks = mock.Mock() - mock_uploader.run_tasks = mock_run_tasks - - self.cmd.take_action(parsed_args) - - mock_task.assert_called_once_with( - image_name='namespace/foo', - pull_source='docker.io', - push_destination='uc.ctlplane.somedomain', - append_tag=parsed_args.append_tag, - modify_role=None, - modify_vars=None, - cleanup=False, - multi_arch=parsed_args.multi_arch) - - mock_add_upload.assert_called_once_with(mock_uploadtask) - mock_run_tasks.assert_called_once() - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.UploadTask') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action_local(self, mock_manager, mock_task, - mock_get_uc_registry): - arglist = ['docker.io/namespace/foo', '--local'] - verifylist = [('image_to_push', 'docker.io/namespace/foo'), - ('local', True)] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - # mock upload task - mock_uploadtask = mock.Mock() - mock_task.return_value = mock_uploadtask - - # mock add upload task action - mock_add_upload = mock.Mock() - data = [] - mock_add_upload.return_value = data - mock_uploader.add_upload_task = mock_add_upload - - # mock run tasks action - mock_run_tasks = mock.Mock() - mock_uploader.run_tasks = mock_run_tasks - - self.cmd.take_action(parsed_args) - - mock_task.assert_called_once_with( - image_name='containers-storage:docker.io/namespace/foo', - pull_source=None, - push_destination='uc.ctlplane.somedomain', - append_tag=parsed_args.append_tag, - modify_role=None, - modify_vars=None, - cleanup=False, - multi_arch=parsed_args.multi_arch) - - mock_add_upload.assert_called_once_with(mock_uploadtask) - mock_run_tasks.assert_called_once() - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.UploadTask') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action_local_path(self, mock_manager, mock_task, - mock_get_uc_registry): - arglist = ['containers-storage:docker.io/namespace/foo'] - verifylist = [('image_to_push', - 'containers-storage:docker.io/namespace/foo')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - # mock upload task - mock_uploadtask = mock.Mock() - mock_task.return_value = mock_uploadtask - - # mock add upload task action - mock_add_upload = mock.Mock() - data = [] - mock_add_upload.return_value = data - mock_uploader.add_upload_task = mock_add_upload - - # mock run tasks action - mock_run_tasks = mock.Mock() - mock_uploader.run_tasks = mock_run_tasks - - self.cmd.take_action(parsed_args) - - mock_task.assert_called_once_with( - image_name='containers-storage:docker.io/namespace/foo', - pull_source=None, - push_destination='uc.ctlplane.somedomain', - append_tag=parsed_args.append_tag, - modify_role=None, - modify_vars=None, - cleanup=False, - multi_arch=parsed_args.multi_arch) - - mock_add_upload.assert_called_once_with(mock_uploadtask) - mock_run_tasks.assert_called_once() - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.UploadTask') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action_oserror(self, mock_manager, mock_task, - mock_get_uc_registry): - arglist = ['docker.io/namespace/foo'] - verifylist = [('image_to_push', 'docker.io/namespace/foo')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - # mock upload task - mock_uploadtask = mock.Mock() - mock_task.return_value = mock_uploadtask - - # mock add upload task action - mock_add_upload = mock.Mock() - data = [] - mock_add_upload.return_value = data - mock_uploader.add_upload_task = mock_add_upload - - # mock run tasks action - mock_run_tasks = mock.Mock() - mock_run_tasks.side_effect = OSError('Fail') - mock_uploader.run_tasks = mock_run_tasks - - self.assertRaises(oscexc.CommandError, - self.cmd.take_action, - parsed_args) - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.UploadTask') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action_all_options(self, mock_manager, mock_task, - mock_get_uc_registry): - arglist = ['--registry-url', '127.0.0.1:8787', - '--append-tag', 'test', - '--source-username', 'sourceuser', - '--source-password', 'sourcepassword', - '--username', 'user', - '--password', 'password', - '--dry-run', - '--multi-arch', - '--cleanup', - 'docker.io/namespace/foo:tag'] - verifylist = [('registry_url', '127.0.0.1:8787'), - ('append_tag', 'test'), - ('username', 'user'), - ('password', 'password'), - ('dry_run', True), - ('multi_arch', True), - ('cleanup', True), - ('image_to_push', 'docker.io/namespace/foo:tag')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - # mock upload task - mock_uploadtask = mock.Mock() - mock_task.return_value = mock_uploadtask - - # mock add upload task action - mock_add_upload = mock.Mock() - data = [] - mock_add_upload.return_value = data - mock_uploader.add_upload_task = mock_add_upload - - # mock run tasks action - mock_run_tasks = mock.Mock() - mock_uploader.run_tasks = mock_run_tasks - - self.cmd.take_action(parsed_args) - - source_url = parse.urlparse("docker://docker.io/namespace/foo:tag") - registry_url = parse.urlparse("docker://127.0.0.1:8787") - auth_calls = [mock.call(source_url, - parsed_args.source_username, - parsed_args.source_password), - mock.call(registry_url, - parsed_args.username, - parsed_args.password)] - mock_uploader.authenticate.assert_has_calls(auth_calls) - - mock_task.assert_not_called() - mock_add_upload.assert_not_called() - mock_run_tasks.assert_not_called() - - -class TestContainerImageDelete(TestPluginV1): - - def setUp(self): - super(TestContainerImageDelete, self).setUp() - - lock = mock.patch('tripleo_common.utils.locks.processlock.ProcessLock') - self.mock_lock = lock.start() - self.addCleanup(lock.stop) - - self.cmd = container_image.TripleOContainerImageDelete(self.app, None) - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_oserror(self, mock_manager, mock_get_uc_registry): - - arglist = ['-y', 'foo'] - verifylist = [('yes', True), - ('image_to_delete', 'foo')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return url object from uploader._image_to_url - mock_url = mock.Mock() - mock_url.geturl.return_value = 'munged-reg-url' - - mock_uploader._image_to_url.return_value = mock_url - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - mock_uploader.delete.side_effect = OSError('Errno 13') - self.assertRaises(oscexc.CommandError, - self.cmd.take_action, - parsed_args) - mock_uploader.delete.assert_called_once_with('foo', - session=mock_session) - - -class TestContainerImageList(TestPluginV1): - - def setUp(self): - super(TestContainerImageList, self).setUp() - - lock = mock.patch('tripleo_common.utils.locks.processlock.ProcessLock') - self.mock_lock = lock.start() - self.addCleanup(lock.stop) - - self.cmd = container_image.TripleOContainerImageList(self.app, None) - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action(self, mock_manager, mock_get_uc_registry): - arglist = [] - verifylist = [] - - mock_manager.return_value.uploader.return_value.list.return_value = \ - ['a', 'b'] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - rv = self.cmd.take_action(parsed_args) - actual = (('Image Name',), [('a',), ('b',)]) - self.assertEqual(actual, rv) - - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry', - return_value='uc.ctlplane.somedomain') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action_auth(self, mock_manager, mock_get_uc_registry): - # check arg parsing items - arglist = ['--registry-url', 'reg-url', - '--username', 'foo', - '--password', 'bar'] - verifylist = [('registry_url', 'reg-url'), - ('username', 'foo'), - ('password', 'bar')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return url object from uploader._image_to_url - mock_url = mock.Mock() - mock_url.geturl.return_value = 'munged-reg-url' - - mock_uploader._image_to_url.return_value = mock_url - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - - # mock image list function - mock_uploader.list.return_value = ['a', 'b'] - - rv = self.cmd.take_action(parsed_args) - - # check various functions are called with expected inputs - mock_mgr.uploader.assert_called_with('python') - mock_uploader._image_to_url.assert_called_with('reg-url') - mock_uploader.authenticate.assert_called_with(mock_url, 'foo', 'bar') - mock_uploader.list.assert_called_with('munged-reg-url', - session=mock_session) - - # check data format for lister - actual = (('Image Name',), [('a',), ('b',)]) - self.assertEqual(actual, rv) - - -class TestContainerImageShow(TestPluginV1): - - def setUp(self): - super(TestContainerImageShow, self).setUp() - - lock = mock.patch('tripleo_common.utils.locks.processlock.ProcessLock') - self.mock_lock = lock.start() - self.addCleanup(lock.stop) - - self.cmd = container_image.TripleOContainerImageShow(self.app, None) - - @mock.patch('tripleoclient.v1.container_image.TripleOContainerImageShow.' - 'format_image_inspect') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager') - def test_take_action(self, mock_manager, mock_formatter): - - arglist = ['foo'] - verifylist = [('image_to_inspect', 'foo')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # mock manager object - mock_mgr = mock.Mock() - mock_manager.return_value = mock_mgr - - # mock uploader object - mock_uploader = mock.Mock() - mock_mgr.uploader.return_value = mock_uploader - - # mock return url object from uploader._image_to_url - mock_url = mock.Mock() - mock_url.geturl.return_value = 'munged-reg-url' - - mock_uploader._image_to_url.return_value = mock_url - - # mock return session object from uploader.authenticate - mock_session = mock.Mock() - mock_uploader.authenticate.return_value = mock_session - mock_inspect = mock.Mock() - data = {'Name': 'a', 'Layers': 'b'} - mock_inspect.return_value = data - mock_uploader.inspect = mock_inspect - - # mock format image inspect - formatted_data = (['Name', 'Layers'], ['a', 'b']) - mock_formatter.return_value = formatted_data - - rv = self.cmd.take_action(parsed_args) - - mock_formatter.assert_called_once_with(data) - self.assertEqual(formatted_data, rv) - - def test_format_image_inspect(self): - test_data = {'Name': 'foo', 'Layers': 'bar'} - self.assertEqual(self.cmd.format_image_inspect(test_data), - (['Name', 'Layers'], ['foo', 'bar'])) - - -class TestTripleoImagePrepareDefault(TestPluginV1): - - def setUp(self): - super(TestTripleoImagePrepareDefault, self).setUp() - # Get the command object to test - self.cmd = container_image.TripleOImagePrepareDefault(self.app, None) - - def test_prepare_default(self): - arglist = [] - verifylist = [] - - self.app.command_options = [ - 'tripleo', 'container', 'image', 'prepare', 'default' - ] + arglist - self.cmd.app.stdout = StringIO() - cmd = container_image.TripleOImagePrepareDefault(self.app, None) - - parsed_args = self.check_parser(cmd, arglist, verifylist) - cmd.take_action(parsed_args) - - result = self.app.stdout.getvalue() - expected_param = kolla_builder.CONTAINER_IMAGE_PREPARE_PARAM - expected = { - 'parameter_defaults': { - 'ContainerImagePrepare': expected_param - } - } - self.assertEqual(expected, yaml.safe_load(result)) - - def test_prepare_default_no_env(self): - arglist = [] - verifylist = [] - - self.app.command_options = [ - 'tripleo', 'container', 'image', 'prepare' - ] + arglist - self.cmd.app.stdout = StringIO() - cmd = container_image.TripleOImagePrepareDefault(self.app, None) - - parsed_args = self.check_parser(cmd, arglist, verifylist) - cmd.take_action(parsed_args) - - result = self.app.stdout.getvalue() - expected_param = kolla_builder.CONTAINER_IMAGE_PREPARE_PARAM - expected = { - 'parameter_defaults': { - 'ContainerImagePrepare': expected_param - } - } - self.assertEqual(expected, yaml.safe_load(result)) - - def test_prepare_default_local_registry(self): - temp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, temp) - env_file = os.path.join(temp, 'containers_env.yaml') - - arglist = ['--local-push-destination', '--output-env-file', env_file] - verifylist = [] - - self.app.command_options = [ - 'tripleo', 'container', 'image', 'prepare', 'default' - ] + arglist - cmd = container_image.TripleOImagePrepareDefault(self.app, None) - parsed_args = self.check_parser(cmd, arglist, verifylist) - - cmd.take_action(parsed_args) - - with open(env_file) as f: - result = yaml.safe_load(f) - self.assertEqual( - True, - result['parameter_defaults']['ContainerImagePrepare'] - [0]['push_destination'] - ) - - def test_prepare_default_registyr_login(self): - temp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, temp) - env_file = os.path.join(temp, 'containers_env.yaml') - - arglist = ['--enable-registry-login', '--output-env-file', env_file] - verifylist = [] - - self.app.command_options = [ - 'tripleo', 'container', 'image', 'prepare', 'default' - ] + arglist - cmd = container_image.TripleOImagePrepareDefault(self.app, None) - parsed_args = self.check_parser(cmd, arglist, verifylist) - - cmd.take_action(parsed_args) - - with open(env_file) as f: - result = yaml.safe_load(f) - self.assertEqual( - True, - result['parameter_defaults']['ContainerImageRegistryLogin'] - ) diff --git a/tripleoclient/tests/v1/test_overcloud_admin.py b/tripleoclient/tests/v1/test_overcloud_admin.py deleted file mode 100644 index cc146a066..000000000 --- a/tripleoclient/tests/v1/test_overcloud_admin.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from tripleoclient.tests.v1 import test_plugin -from tripleoclient.v1 import overcloud_admin - - -class TestAdminAuthorize(test_plugin.TestPluginV1): - def setUp(self): - super(TestAdminAuthorize, self).setUp() - self.cmd = overcloud_admin.Authorize(self.app, None) - self.app.client_manager = mock.Mock() - - @mock.patch('tripleoclient.utils.parse_ansible_inventory', - autospec=True) - @mock.patch('tripleoclient.utils.get_key') - @mock.patch('tripleoclient.utils.get_default_working_dir') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_admin_authorize(self, - mock_playbook, - mock_dir, - mock_key, - mock_inventory): - arglist = ['--limit', 'overcloud'] - verifylist = [('limit_hosts', 'overcloud')] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - mock_dir.return_value = "/home/stack/overcloud-deploy" - ansible_dir = "{}/config-download/overcloud".format( - mock_dir.return_value - ) - inventory = "{}/tripleo-ansible-inventory.yaml".format( - ansible_dir - ) - - mock_key.return_value = '/home/stack/.ssh/id_rsa_tripleo' - mock_inventory.return_value = ['overcloud-novacompute-0', - 'overcloud-dellcompute-0', - 'overcloud-controller-0'] - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - playbook='cli-enable-ssh-admin.yaml', - inventory=inventory, - workdir=ansible_dir, - key=parsed_args.overcloud_ssh_key, - playbook_dir='/usr/share/ansible/tripleo-playbooks', - ssh_user=parsed_args.overcloud_ssh_user, - extra_vars={ - "ANSIBLE_PRIVATE_KEY_FILE": '/home/stack/.ssh/id_rsa_tripleo', - "ssh_servers": ['overcloud-novacompute-0', - 'overcloud-dellcompute-0', - 'overcloud-controller-0'] - }, - limit_hosts='localhost,overcloud', - ansible_timeout=parsed_args.overcloud_ssh_port_timeout - ) diff --git a/tripleoclient/tests/v1/test_overcloud_bios.py b/tripleoclient/tests/v1/test_overcloud_bios.py deleted file mode 100644 index 4054c2ae0..000000000 --- a/tripleoclient/tests/v1/test_overcloud_bios.py +++ /dev/null @@ -1,172 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import tempfile -from unittest import mock - -from osc_lib.tests import utils as test_utils - -from tripleoclient.tests import fakes as ooofakes -from tripleoclient.tests.v1.baremetal import fakes -from tripleoclient.v1 import overcloud_bios - - -class Base(fakes.TestBaremetal): - def setUp(self): - super(Base, self).setUp() - self.conf = { - "settings": [ - {"name": "virtualization", "value": "on"}, - {"name": "hyperthreading", "value": "on"} - ] - } - self.app.client_manager.baremetal.node.list.return_value = [] - - -class TestConfigureBIOS(Base): - - def setUp(self): - super(TestConfigureBIOS, self).setUp() - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = ooofakes.FakeOptions() - self.cmd = overcloud_bios.ConfigureBIOS(self.app, app_args) - playbook_runner = mock.patch( - 'tripleoclient.utils.run_ansible_playbook', - autospec=True - ) - playbook_runner.start() - - def test_configure_specified_nodes_ok(self): - conf = json.dumps(self.conf) - arglist = ['--configuration', conf, 'node_uuid1', 'node_uuid2'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - def test_configure_specified_nodes_and_configuration_from_file(self): - with tempfile.NamedTemporaryFile('w+t') as fp: - json.dump(self.conf, fp) - fp.flush() - arglist = ['--configuration', fp.name, 'node_uuid1', 'node_uuid2'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('configuration', fp.name) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - def test_configure_specified_nodes_and_configuration_not_yaml(self): - arglist = ['--configuration', ':', 'node_uuid1', 'node_uuid2'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('configuration', ':') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaisesRegex(RuntimeError, 'cannot be parsed as YAML', - self.cmd.take_action, parsed_args) - - def test_configure_specified_nodes_and_configuration_bad_type(self): - for conf in ('[]', '{"settings": 42}', '{settings: [42]}'): - arglist = ['--configuration', conf, 'node_uuid1', 'node_uuid2'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(TypeError, self.cmd.take_action, parsed_args) - - def test_configure_specified_nodes_and_configuration_bad_value(self): - conf = '{"another_key": [{}]}' - arglist = ['--configuration', conf, 'node_uuid1', 'node_uuid2'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(ValueError, self.cmd.take_action, parsed_args) - - def test_configure_uuids_and_all_both_specified(self): - conf = json.dumps(self.conf) - arglist = ['--configuration', conf, 'node_uuid1', 'node_uuid2', - '--all-manageable'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('configuration', conf), - ('all-manageable', True) - ] - self.assertRaises(test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - def test_configure_all_manageable_nodes_ok(self): - conf = json.dumps(self.conf) - arglist = ['--configuration', conf, '--all-manageable'] - verifylist = [ - ('all_manageable', True), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestResetBIOS(Base): - - def setUp(self): - super(TestResetBIOS, self).setUp() - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = ooofakes.FakeOptions() - self.cmd = overcloud_bios.ResetBIOS(self.app, app_args) - playbook_runner = mock.patch( - 'tripleoclient.utils.run_ansible_playbook', - autospec=True - ) - playbook_runner.start() - - def test_reset_specified_nodes_ok(self): - arglist = ['node_uuid1', 'node_uuid2'] - verifylist = [('node_uuids', ['node_uuid1', 'node_uuid2'])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - def test_reset_all_manageable_nodes_ok(self): - arglist = ['--all-manageable'] - verifylist = [('all_manageable', True)] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - def test_reset_no_nodes(self): - arglist = [] - verifylist = [] - self.assertRaises(test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - def test_reset_uuids_and_all_both_specified(self): - arglist = ['node_uuid1', 'node_uuid2', '--all-manageable'] - verifylist = [ - ('node_uuids', ['node_uuid1', 'node_uuid2']), - ('all-manageable', True) - ] - self.assertRaises(test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) diff --git a/tripleoclient/tests/v1/test_overcloud_cell.py b/tripleoclient/tests/v1/test_overcloud_cell.py deleted file mode 100644 index dda6c3fda..000000000 --- a/tripleoclient/tests/v1/test_overcloud_cell.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock -from osc_lib.tests import utils - -from tripleoclient.v1 import overcloud_cell -from tripleoclient.exceptions import CellExportError -from tripleoclient import utils as oooutils - - -class TestExportCell(utils.TestCommand): - def setUp(self): - super(TestExportCell, self).setUp() - self.cmd = overcloud_cell.ExportCell(self.app, None) - self.app.client_manager.orchestration = mock.Mock() - - @mock.patch('tripleoclient.v1.overcloud_cell.yaml.safe_dump') - @mock.patch('tripleoclient.v1.overcloud_cell.print') - @mock.patch('tripleoclient.v1.overcloud_cell.open') - @mock.patch('tripleoclient.v1.overcloud_cell.export.export_stack') - @mock.patch( - 'tripleoclient.v1.overcloud_cell.export.export_passwords', - autospec=True) - @mock.patch( - 'tripleoclient.v1.overcloud_cell.os.path.exists', - autospec=True, return_value=False) - def test_export_cell_defaults(self, mock_path_exists, - mock_export_passwords, mock_export_stack, - mock_open, mock_print, mock_yaml_dump): - """Test class methods with all default parameters. - The test approximates the behavior of the CLI under assumption that no - alternative values are provided and no exceptions are raised. - """ - argslist = [] - verifylist = [] - - mock_export_passwords._return_value = {'foo': 'bar'} - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - mock_path_exists.assert_any_call('overcloud-cell-export.yaml') - mock_export_passwords.assert_called_once_with( - oooutils.get_default_working_dir('overcloud'), - 'overcloud') - mock_export_stack.assert_called_once_with( - oooutils.get_default_working_dir('overcloud'), - 'overcloud', - True, - os.path.join( - os.environ.get('HOME'), - 'overcloud-deploy', - 'overcloud', - 'config-download')) - mock_print.assert_called() - mock_open.assert_called_once_with('overcloud-cell-export.yaml', 'w') - mock_yaml_dump.assert_called_once() - - @mock.patch('tripleoclient.v1.overcloud_cell.yaml.safe_dump') - @mock.patch('tripleoclient.v1.overcloud_cell.print') - @mock.patch('tripleoclient.v1.overcloud_cell.open') - @mock.patch('tripleoclient.v1.overcloud_cell.export.export_stack') - @mock.patch( - 'tripleoclient.v1.overcloud_cell.export.export_passwords', - autospec=True) - @mock.patch( - 'tripleoclient.v1.overcloud_cell.os.path.exists', - autospec=True, return_value=False) - def test_export_cell_stack_config_dir(self, mock_path_exists, - mock_export_passwords, - mock_export_stack, - mock_open, - mock_print, mock_yaml_dump): - """Test class methods with alternative 'cells_stack' - and 'config_download_dir' argument values. - - The test approximates CLI behavior with no exceptions raised. - """ - argslist = ['--cell-stack', 'fizz', '--config-download-dir', 'buzz'] - verifylist = [('cell_stack', 'fizz'), ('config_download_dir', 'buzz')] - - mock_export_passwords._return_value = {'foo': 'bar'} - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.cmd.take_action(parsed_args) - mock_path_exists.assert_any_call('fizz-cell-export.yaml') - mock_export_passwords.assert_called_once_with( - oooutils.get_default_working_dir('fizz'), - 'fizz') - mock_export_stack.assert_called_once_with( - oooutils.get_default_working_dir('fizz'), - 'fizz', - False, - 'buzz') - mock_print.assert_called() - mock_open.assert_called_once_with('fizz-cell-export.yaml', 'w') - mock_yaml_dump.assert_called_once() - - @mock.patch( - 'tripleoclient.v1.overcloud_cell.os.path.exists', - return_value=True) - def test_cell_exception(self, mock_exists): - """Test exception triggering behavior of the 'take_action' method. - If the output file exists and the 'forced-overwrite' flag isn't set, - the method must raise CellExportError to notify the operator. - """ - argslist = [] - verifylist = [] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - self.assertRaises(CellExportError, self.cmd.take_action, parsed_args) - mock_exists.assert_any_call('overcloud-cell-export.yaml') diff --git a/tripleoclient/tests/v1/test_overcloud_export.py b/tripleoclient/tests/v1/test_overcloud_export.py deleted file mode 100644 index 232fa28ab..000000000 --- a/tripleoclient/tests/v1/test_overcloud_export.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient import utils as ooo_utils -from tripleoclient.v1 import overcloud_export - - -class TestOvercloudExport(utils.TestCommand): - - def setUp(self): - super(TestOvercloudExport, self).setUp() - - self.cmd = overcloud_export.ExportOvercloud(self.app, None) - self.app.client_manager.orchestration = mock.Mock() - self.tripleoclient = mock.Mock() - self.app.client_manager.tripleoclient = self.tripleoclient - self.mock_open = mock.mock_open() - - @mock.patch('os.path.exists') - @mock.patch('yaml.safe_dump') - @mock.patch('tripleoclient.export.export_stack') - @mock.patch('tripleoclient.export.export_passwords') - def test_export(self, mock_export_passwords, - mock_export_stack, - mock_safe_dump, - mock_exists): - argslist = [] - verifylist = [] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - mock_exists.return_value = False - mock_export_passwords.return_value = {'key': 'value'} - mock_export_stack.return_value = {'key0': 'value0'} - with mock.patch('builtins.open', self.mock_open): - self.cmd.take_action(parsed_args) - mock_export_passwords.assert_called_once_with( - ooo_utils.get_default_working_dir('overcloud'), - 'overcloud', True) - path = os.path.join(os.environ.get('HOME'), - 'overcloud-deploy', - 'overcloud', - 'config-download') - mock_export_stack.assert_called_once_with( - ooo_utils.get_default_working_dir('overcloud'), - 'overcloud', - False, - path) - self.assertEqual( - {'parameter_defaults': {'AddVipsToEtcHosts': False, - 'key': 'value', - 'key0': 'value0'}}, - mock_safe_dump.call_args[0][0]) - - @mock.patch('os.path.exists') - @mock.patch('yaml.safe_dump') - @mock.patch('tripleoclient.export.export_stack') - @mock.patch('tripleoclient.export.export_passwords') - def test_export_stack_name(self, mock_export_passwords, - mock_export_stack, - mock_safe_dump, - mock_exists): - argslist = ['--stack', 'foo'] - verifylist = [('stack', 'foo')] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - mock_exists.return_value = False - with mock.patch('builtins.open', self.mock_open): - self.cmd.take_action(parsed_args) - mock_export_passwords.assert_called_once_with( - ooo_utils.get_default_working_dir('foo'), - 'foo', True) - path = os.path.join(os.environ.get('HOME'), - 'overcloud-deploy', - 'foo', - 'config-download') - mock_export_stack.assert_called_once_with( - ooo_utils.get_default_working_dir('foo'), - 'foo', - False, - path) - - @mock.patch('os.path.exists') - @mock.patch('yaml.safe_dump') - @mock.patch('tripleoclient.export.export_stack') - @mock.patch('tripleoclient.export.export_passwords') - def test_export_stack_name_and_dir(self, mock_export_passwords, - mock_export_stack, - mock_safe_dump, mock_exists): - argslist = ['--stack', 'foo', - '--config-download-dir', '/tmp/bar'] - verifylist = [('stack', 'foo'), - ('config_download_dir', '/tmp/bar')] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - mock_exists.return_value = False - with mock.patch('builtins.open', self.mock_open): - self.cmd.take_action(parsed_args) - mock_export_passwords.assert_called_once_with( - ooo_utils.get_default_working_dir('foo'), - 'foo', True) - mock_export_stack.assert_called_once_with( - ooo_utils.get_default_working_dir('foo'), - 'foo', - False, - '/tmp/bar') - - @mock.patch('os.path.exists') - @mock.patch('yaml.safe_dump') - @mock.patch('tripleoclient.export.export_stack') - @mock.patch('tripleoclient.export.export_passwords') - def test_export_no_excludes(self, mock_export_passwords, - mock_export_stack, - mock_safe_dump, mock_exists): - argslist = ['--stack', 'foo', - '--config-download-dir', '/tmp/bar', - '--no-password-excludes'] - verifylist = [('stack', 'foo'), - ('config_download_dir', '/tmp/bar'), - ('no_password_excludes', True)] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - mock_exists.return_value = False - with mock.patch('builtins.open', self.mock_open): - self.cmd.take_action(parsed_args) - mock_export_passwords.assert_called_once_with( - ooo_utils.get_default_working_dir('foo'), - 'foo', False) - mock_export_stack.assert_called_once_with( - ooo_utils.get_default_working_dir('foo'), - 'foo', - False, - '/tmp/bar') - - @mock.patch('tripleo_common.utils.plan.generate_passwords') - @mock.patch('shutil.copy') - @mock.patch('os.path.exists') - @mock.patch('tripleoclient.utils.get_default_working_dir') - def test_export_ephemeral_heat(self, mock_working_dir, mock_exists, - mock_copy, mock_passwords): - argslist = ['--force-overwrite'] - verifylist = [('force_overwrite', True)] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - mock_exists.return_value = True - mock_working_dir.return_value = 'wd' - mock_open = mock.mock_open(read_data='{}') - mock_passwords.return_value = dict() - with mock.patch('builtins.open', mock_open): - self.cmd.take_action(parsed_args) - mock_working_dir.assert_called() - mock_passwords.assert_called() diff --git a/tripleoclient/tests/v1/test_overcloud_export_ceph.py b/tripleoclient/tests/v1/test_overcloud_export_ceph.py deleted file mode 100644 index 94131635d..000000000 --- a/tripleoclient/tests/v1/test_overcloud_export_ceph.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient.v1 import overcloud_export_ceph - - -class TestOvercloudExportCeph(utils.TestCommand): - - def setUp(self): - super(TestOvercloudExportCeph, self).setUp() - - self.cmd = overcloud_export_ceph.ExportOvercloudCeph(self.app, None) - self.tripleoclient = mock.Mock() - self.app.client_manager.tripleoclient = self.tripleoclient - self.mock_open = mock.mock_open() - - @mock.patch('os.path.exists') - @mock.patch('yaml.safe_dump') - @mock.patch('tripleoclient.export.export_ceph') - def test_export_ceph(self, mock_export_ceph, - mock_safe_dump, - mock_exists): - argslist = ['--stack', 'dcn0'] - verifylist = [('stack', 'dcn0')] - parsed_args = self.check_parser(self.cmd, argslist, verifylist) - mock_exists.return_value = False - expected = { - 'external_cluster_mon_ips': '192.168.24.42', - 'keys': [ - {'name': 'client.openstack'} - ], - 'ceph_conf_overrides': { - 'client': { - 'keyring': '/etc/ceph/dcn0.client.openstack.keyring' - } - }, - 'cluster': 'dcn0', - 'fsid': 'a5a22d37-e01f-4fa0-a440-c72585c7487f', - 'dashboard_enabled': False - } - data = {} - data['parameter_defaults'] = {} - data['parameter_defaults']['CephExternalMultiConfig'] = [expected] - mock_export_ceph.return_value = expected - - with mock.patch('builtins.open', self.mock_open): - self.cmd.take_action(parsed_args) - path = os.path.join(os.environ.get('HOME'), - 'overcloud-deploy', 'dcn0', 'config-download') - mock_export_ceph.assert_called_once_with('dcn0', 'openstack', path) - self.assertEqual(data, mock_safe_dump.call_args[0][0]) diff --git a/tripleoclient/tests/v1/test_overcloud_parameters.py b/tripleoclient/tests/v1/test_overcloud_parameters.py deleted file mode 100644 index 8d1ae1b1a..000000000 --- a/tripleoclient/tests/v1/test_overcloud_parameters.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient.v1 import overcloud_parameters - - -class TestGenerateFencingParameters(utils.TestCommand): - - def setUp(self): - super(TestGenerateFencingParameters, self).setUp() - - self.cmd = overcloud_parameters.GenerateFencingParameters(self.app, - None) - self.app.client_manager = mock.Mock() - - @mock.patch( - 'tripleoclient.workflows.parameters.generate_fencing_parameters', - autospec=True) - def test_generate_parameters(self, mock_gen_fence): - mock_open_context = mock.mock_open(read_data=""" -{ - "nodes": [ - { - "name": "control-0", - "pm_password": "control-0-password", - "pm_type": "ipmi", - "pm_user": "control-0-admin", - "pm_addr": "0.1.2.3", - "pm_port": "0123", - "mac": [ - "00:11:22:33:44:55" - ] - }, - { - "name": "control-1", - "pm_password": "control-1-password", - "pm_type": "ipmi", - "pm_user": "control-1-admin", - "pm_addr": "1.2.3.4", - "mac": [ - "11:22:33:44:55:66" - ] - } - ] -} - """) - - arglist = ['node_file.json'] - verifylist = [] - - mock_gen_fence.return_value = '{"result":[]}' - - with mock.patch('builtins.open', mock_open_context): - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_gen_fence.assert_called_once_with( - **{ - 'nodes_json': [ - { - u'mac': [u'00:11:22:33:44:55'], - u'name': u'control-0', - u'pm_port': u'0123', - u'pm_addr': u'0.1.2.3', - u'pm_type': u'ipmi', - u'pm_password': u'control-0-password', - u'pm_user': u'control-0-admin' - }, - { - u'name': u'control-1', - u'pm_addr': u'1.2.3.4', - u'pm_type': u'ipmi', - u'pm_user': u'control-1-admin', - u'pm_password': u'control-1-password', - u'mac': [u'11:22:33:44:55:66'] - }], - 'delay': None, - 'ipmi_cipher': None, - 'ipmi_lanplus': True, - 'ipmi_level': None - }) diff --git a/tripleoclient/tests/v1/test_overcloud_profiles.py b/tripleoclient/tests/v1/test_overcloud_profiles.py deleted file mode 100644 index 740157e51..000000000 --- a/tripleoclient/tests/v1/test_overcloud_profiles.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from tripleoclient import exceptions -from tripleoclient.tests import fakes -from tripleoclient.tests.v1 import test_plugin -from tripleoclient import utils -from tripleoclient.v1 import overcloud_profiles - - -@mock.patch.object(utils, 'assign_and_verify_profiles', autospec=True) -class TestMatchProfiles(test_plugin.TestPluginV1): - def setUp(self): - super(TestMatchProfiles, self).setUp() - self.cmd = overcloud_profiles.MatchProfiles(self.app, None) - self.app.client_manager.tripleoclient = mock.Mock() - self.app.client_manager.baremetal = mock.Mock() - self.app.client_manager.compute = mock.Mock() - self.flavors = [ - fakes.FakeFlavor('compute'), - fakes.FakeFlavor('control'), - ] - self.app.client_manager.compute.flavors.list.return_value = ( - self.flavors) - - def test_ok(self, mock_assign): - mock_assign.return_value = (0, 0) - - arglist = [ - '--compute-flavor', 'compute', - '--compute-scale', '3', - '--control-flavor', 'control', - '--control-scale', '1', - ] - parsed_args = self.check_parser(self.cmd, arglist, []) - - self.cmd.take_action(parsed_args) - - mock_assign.assert_called_once_with( - self.app.client_manager.baremetal, - {'compute': (self.flavors[0], 3), - 'control': (self.flavors[1], 1)}, - assign_profiles=True, dry_run=False) - - def test_failed(self, mock_assign): - mock_assign.return_value = (2, 0) - - arglist = [ - '--compute-flavor', 'compute', - '--compute-scale', '3', - '--control-flavor', 'control', - '--control-scale', '1', - ] - parsed_args = self.check_parser(self.cmd, arglist, []) - - self.assertRaises(exceptions.ProfileMatchingError, - self.cmd.take_action, parsed_args) - - mock_assign.assert_called_once_with( - self.app.client_manager.baremetal, - {'compute': (self.flavors[0], 3), - 'control': (self.flavors[1], 1)}, - assign_profiles=True, dry_run=False) - - def test_dry_run(self, mock_assign): - mock_assign.return_value = (0, 0) - - arglist = [ - '--compute-flavor', 'compute', - '--compute-scale', '3', - '--control-flavor', 'control', - '--control-scale', '1', - '--dry-run' - ] - parsed_args = self.check_parser(self.cmd, arglist, []) - - self.cmd.take_action(parsed_args) - - mock_assign.assert_called_once_with( - self.app.client_manager.baremetal, - {'compute': (self.flavors[0], 3), - 'control': (self.flavors[1], 1)}, - assign_profiles=True, dry_run=True) - - -class TestListProfiles(test_plugin.TestPluginV1): - def setUp(self): - super(TestListProfiles, self).setUp() - self.cmd = overcloud_profiles.ListProfiles(self.app, None) - self.app.client_manager.tripleoclient = mock.Mock() - self.app.client_manager.baremetal = mock.Mock() - self.app.client_manager.compute = mock.Mock() - self.nodes = [ - mock.Mock(uuid='uuid1', provision_state='active', - properties={}, maintenance=False), - mock.Mock(uuid='uuid2', provision_state='enroll', - properties={'capabilities': 'profile:compute'}, - maintenance=False), - mock.Mock(uuid='uuid3', provision_state='available', - properties={'capabilities': 'profile:compute,' - 'compute_profile:1,control_profile:true'}, - maintenance=False), - mock.Mock(uuid='uuid4', provision_state='available', - properties={'capabilities': 'profile:compute,' - 'compute_profile:0'}, maintenance=False), - mock.Mock(uuid='uuid5', provision_state='available', - properties={}, maintenance=False), - mock.Mock(uuid='uuid6', provision_state='available', - properties={}, maintenance=False), - mock.Mock(uuid='uuid7', provision_state='active', - properties={}, maintenance=True), - ] - self.hypervisors = [ - mock.Mock(hypervisor_type='ironic', - hypervisor_hostname='uuid%d' % i, - status='enabled', state='up') - for i in range(1, 6) - ] - self.hypervisors[-1].status = 'disabled' - self.bm_client = self.app.client_manager.baremetal - self.bm_client.node.list.return_value = self.nodes - self.compute_client = self.app.client_manager.compute - self.compute_client.hypervisors.list.return_value = self.hypervisors - - def test_list(self): - parsed_args = self.check_parser(self.cmd, [], []) - result = self.cmd.take_action(parsed_args) - self.assertEqual(5, len(result[0])) - self.assertEqual( - [('uuid1', self.nodes[0].name, 'active', None, ''), - ('uuid3', self.nodes[2].name, 'available', 'compute', - 'compute, control'), - ('uuid4', self.nodes[3].name, 'available', 'compute', '')], - result[1]) - - def test_all(self): - parsed_args = self.check_parser(self.cmd, ['--all'], [('all', True)]) - result = self.cmd.take_action(parsed_args) - self.assertEqual(6, len(result[0])) - self.assertEqual( - [('uuid1', self.nodes[0].name, 'active', None, '', ''), - ('uuid2', self.nodes[1].name, 'enroll', 'compute', '', - 'Provision state enroll'), - ('uuid3', self.nodes[2].name, 'available', 'compute', - 'compute, control', ''), - ('uuid4', self.nodes[3].name, 'available', 'compute', '', ''), - ('uuid5', self.nodes[4].name, 'available', None, '', - 'Compute service disabled'), - ('uuid6', self.nodes[5].name, 'available', None, '', - 'No hypervisor record'), - ('uuid7', self.nodes[6].name, 'active', None, '', - 'Maintenance')], - result[1]) diff --git a/tripleoclient/tests/v1/test_overcloud_raid.py b/tripleoclient/tests/v1/test_overcloud_raid.py deleted file mode 100644 index 9251e2cc7..000000000 --- a/tripleoclient/tests/v1/test_overcloud_raid.py +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import tempfile -from unittest import mock - -from osc_lib.tests import utils as test_utils - -from tripleoclient.tests import fakes as ooofakes -from tripleoclient.tests.v1.baremetal import fakes -from tripleoclient.v1 import overcloud_raid - - -class TestCreateRAID(fakes.TestBaremetal): - - def setUp(self): - super(TestCreateRAID, self).setUp() - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = ooofakes.FakeOptions() - self.cmd = overcloud_raid.CreateRAID(self.app, app_args) - - self.conf = { - "logical_disks": [ - {"foo": "bar"}, - {"foo2": "bar2"} - ] - } - execution = mock.MagicMock( - output=json.dumps({ - "result": None - }) - ) - execution.id = "IDID" - playbook_runner = mock.patch( - 'tripleoclient.utils.run_ansible_playbook', - autospec=True - ) - playbook_runner.start() - - def test_ok(self): - conf = json.dumps(self.conf) - arglist = ['--node', 'uuid1', '--node', 'uuid2', conf] - verifylist = [ - ('node', ['uuid1', 'uuid2']), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - def test_from_file(self): - with tempfile.NamedTemporaryFile('w+t') as fp: - json.dump(self.conf, fp) - fp.flush() - arglist = ['--node', 'uuid1', '--node', 'uuid2', fp.name] - verifylist = [ - ('node', ['uuid1', 'uuid2']), - ('configuration', fp.name) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - def test_no_nodes(self): - arglist = ['{}'] - verifylist = [ - ('configuration', '{}') - ] - self.assertRaises(test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - def test_not_yaml(self): - arglist = ['--node', 'uuid1', '--node', 'uuid2', ':'] - verifylist = [ - ('node', ['uuid1', 'uuid2']), - ('configuration', ':') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaisesRegex(RuntimeError, 'cannot be parsed as YAML', - self.cmd.take_action, parsed_args) - - def test_bad_type(self): - for conf in ('[]', '{logical_disks: 42}', '{logical_disks: [42]}'): - arglist = ['--node', 'uuid1', '--node', 'uuid2', conf] - verifylist = [ - ('node', ['uuid1', 'uuid2']), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(TypeError, self.cmd.take_action, parsed_args) - - def test_bad_value(self): - conf = '{another_key: [{}]}' - arglist = ['--node', 'uuid1', '--node', 'uuid2', conf] - verifylist = [ - ('node', ['uuid1', 'uuid2']), - ('configuration', conf) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(ValueError, self.cmd.take_action, parsed_args) diff --git a/tripleoclient/tests/v1/test_plugin.py b/tripleoclient/tests/v1/test_plugin.py deleted file mode 100644 index df21650e8..000000000 --- a/tripleoclient/tests/v1/test_plugin.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleoclient.tests import base -from tripleoclient.tests import fakes - -# Load the plugin init module for the plugin list and show commands -plugin_name = 'tripleoclient' -plugin_client = 'tripleoclient.plugin' - - -class FakePluginV1Client(object): - def __init__(self, **kwargs): - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - - -class TestPluginV1(base.TestCommand): - def setUp(self): - super(TestPluginV1, self).setUp() - self.app.client_manager.tripleoclient = FakePluginV1Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - get_key = mock.patch('tripleoclient.utils.get_key') - get_key.start() - get_key.return_value = 'keyfile-path' - self.addCleanup(get_key.stop) diff --git a/tripleoclient/tests/v1/test_tripleo_config.py b/tripleoclient/tests/v1/test_tripleo_config.py deleted file mode 100644 index 74d951180..000000000 --- a/tripleoclient/tests/v1/test_tripleo_config.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Tests for the tripleoclient.v1.tripleo_config - -Tests basic parser behavior, with both default and user supplied -values of arguments. -Further assertions are placed on results of the parser. -""" -from unittest import mock - -from tripleoclient.tests import base - -from tripleoclient.constants import UNDERCLOUD_OUTPUT_DIR -from tripleoclient.v1 import tripleo_config - - -class TestGenerateAnsibleConfig(base.TestCommand): - - def setUp(self): - super(TestGenerateAnsibleConfig, self).setUp() - self.config = tripleo_config - self.cmd = tripleo_config.GenerateAnsibleConfig(self.app, None) - - @mock.patch('tripleo_common.utils.ansible.write_default_ansible_cfg') - @mock.patch( - 'tripleoclient.utils.get_deployment_user', - return_value='stack') - @mock.patch('tripleoclient.v1.tripleo_config.logging') - def test_all_defaults(self, mock_log, mock_deploy_user, mock_ansible): - defaults = [ - ('deployment_user', 'stack'), - ('output_dir', UNDERCLOUD_OUTPUT_DIR)] - - parsed_args = self.check_parser(self.cmd, [], defaults) - self.cmd.take_action(parsed_args) - - mock_ansible.assert_called_once_with( - UNDERCLOUD_OUTPUT_DIR, - 'stack', - ssh_private_key=None) - - @mock.patch('tripleo_common.utils.ansible.write_default_ansible_cfg') - @mock.patch( - 'tripleoclient.utils.get_deployment_user', - return_value='notastack') - @mock.patch('tripleoclient.v1.tripleo_config.logging') - def test_all_defaults_not_matching_deploy_user(self, mock_log, - mock_deploy_user, - mock_ansible): - defaults = [ - ('deployment_user', 'stack'), - ('output_dir', UNDERCLOUD_OUTPUT_DIR)] - - parsed_args = self.check_parser(self.cmd, [], defaults) - self.cmd.take_action(parsed_args) - - mock_ansible.assert_called_once_with( - UNDERCLOUD_OUTPUT_DIR, - 'stack', - ssh_private_key=None) - - @mock.patch('tripleo_common.utils.ansible.write_default_ansible_cfg') - @mock.patch( - 'tripleoclient.utils.get_deployment_user', - return_value='foo') - @mock.patch('tripleoclient.v1.tripleo_config.logging') - def test_all_alternate(self, mock_log, mock_deploy_user, mock_ansible): - defaults = [ - ('deployment_user', 'foo'), - ('output_dir', '/fizz/buzz')] - - args = ['--deployment-user', 'foo', '--output-dir', '/fizz/buzz'] - - parsed_args = self.check_parser(self.cmd, args, defaults) - self.cmd.take_action(parsed_args) - - mock_ansible.assert_called_once_with( - '/fizz/buzz', - 'foo', - ssh_private_key=None) diff --git a/tripleoclient/tests/v1/test_tripleo_validator.py b/tripleoclient/tests/v1/test_tripleo_validator.py deleted file mode 100644 index 8f9bbacd6..000000000 --- a/tripleoclient/tests/v1/test_tripleo_validator.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleoclient.tests import base - -from tripleoclient.v1 import tripleo_validator -from tripleoclient import constants -from tripleoclient.tests import fakes - - -class TestValidator(base.TestCase): - - def setUp(self): - self.validator = tripleo_validator - super(TestValidator, self).setUp() - - def test_module_init(self): - expected_names = set([ - 'LOG', - 'TripleOValidatorList', - 'TripleOValidatorShow', - 'TripleOValidatorGroupInfo', - 'TripleOValidatorShowParameter', - 'TripleOValidatorRun', - 'TripleOValidatorCommunityInit', - 'TripleOValidatorShowHistory', - 'TripleOValidatorShowRun' - ]) - - module_names = set(dir(self.validator)) - - self.assertTrue(expected_names.issubset(module_names)) - - -class TestValidatorGroupInfo(base.TestCommand): - - def setUp(self): - super(TestValidatorGroupInfo, self).setUp() - - # Get the command object to test - self.cmd = tripleo_validator.TripleOValidatorGroupInfo(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'group_information', autospec=True, - return_value=fakes.GROUPS_LIST) - def test_show_group_info(self, mock_validations): - arglist = [] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestValidatorList(base.TestCommand): - - def setUp(self): - super(TestValidatorList, self).setUp() - - # Get the command object to test - self.cmd = tripleo_validator.TripleOValidatorList(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'list_validations', - autospec=True, - return_value=fakes.VALIDATIONS_LIST) - def test_validation_list_noargs(self, mock_validations): - arglist = [] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestValidatorShow(base.TestCommand): - - def setUp(self): - super(TestValidatorShow, self).setUp() - - # Get the command object to test - self.cmd = tripleo_validator.TripleOValidatorShow(self.app, None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations', - autospec=True, - return_value=fakes.VALIDATIONS_LIST[0]) - def test_validation_show(self, mock_validations): - arglist = ['my_val1'] - verifylist = [('validation_name', 'my_val1')] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestValidatorShowParameter(base.TestCommand): - - def setUp(self): - super(TestValidatorShowParameter, self).setUp() - - # Get the command object to test - self.cmd = tripleo_validator.TripleOValidatorShowParameter(self.app, - None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_validations_parameters', - autospec=True, - return_value=fakes.VALIDATIONS_LIST[1]) - def test_validation_show_parameter(self, mock_validations): - arglist = ['--validation', 'my_val2'] - verifylist = [('validation_name', ['my_val2'])] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestValidatorShowRun(base.TestCommand): - - def setUp(self): - super(TestValidatorShowRun, self).setUp() - - # Get the command object to test - self.cmd = tripleo_validator.TripleOValidatorShowRun(self.app, - None) - - @mock.patch('validations_libs.validation_actions.ValidationLogs.' - 'get_logfile_content_by_uuid', - autospec=True, - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - def test_validation_show_run(self, mock_validations): - arglist = ['008886df-d297-1eaa-2a74-000000000008'] - verifylist = [('uuid', '008886df-d297-1eaa-2a74-000000000008')] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestValidatorShowHistory(base.TestCommand): - - def setUp(self): - super(TestValidatorShowHistory, self).setUp() - - # Get the command object to test - self.cmd = tripleo_validator.TripleOValidatorShowHistory(self.app, - None) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_history', - autospec=True, - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - def test_validation_show_history(self, mock_validations): - arglist = [] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'show_history', - autospec=True, - return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST) - def test_validation_show_history_for_a_validation(self, mock_validations): - arglist = [ - '--validation', - '512e' - ] - verifylist = [('validation', '512e')] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - -class TestValidatorRun(base.TestCommand): - - def setUp(self): - super().setUp() - - self.cmd = tripleo_validator.TripleOValidatorRun(self.app, None) - - @mock.patch('validations_libs.cli.run.common.print_dict', autospec=True) - @mock.patch('validations_libs.cli.run.common.write_output', autospec=True) - @mock.patch('validations_libs.validation_actions.ValidationActions.' - 'run_validations', - return_value=fakes.FAKE_SUCCESS_RUN, - autospec=True) - def test_validation_run(self, mock_validations, mock_write_output, - mock_print_dict): - arglist = ['--validation', 'mock_validation'] - verify_list = [ - ('validation_name', ['mock_validation']), - ('validation_log_dir', constants.VALIDATIONS_LOG_BASEDIR)] - - parsed_args = self.check_parser(self.cmd, arglist, verify_list) - self.cmd.take_action(parsed_args) - - # The 'output.log' argument value isn't derived from CLI arguments - # but from the VF configuration file. Changes to it, or to the way - # it is handled, should be reflected here. - - mock_write_output.assert_called_once_with( - 'output.log', fakes.FAKE_SUCCESS_RUN) - - mock_print_dict.assert_called_once_with(fakes.FAKE_SUCCESS_RUN) diff --git a/tripleoclient/tests/v1/tripleo/__init__.py b/tripleoclient/tests/v1/tripleo/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/tripleo/test_tripleo_deploy.py b/tripleoclient/tests/v1/tripleo/test_tripleo_deploy.py deleted file mode 100644 index f874ac7b0..000000000 --- a/tripleoclient/tests/v1/tripleo/test_tripleo_deploy.py +++ /dev/null @@ -1,1093 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -import sys -import tempfile -import yaml -from unittest import mock - -from heatclient import exc as hc_exc - -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient.tests import fakes -from tripleoclient.tests.v1.test_plugin import TestPluginV1 - -# Load the plugin init module for the plugin list and show commands -from tripleoclient.v1 import tripleo_deploy - -import ansible_runner - - -class FakePluginV1Client(object): - def __init__(self, **kwargs): - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - - -class TestDeployUndercloud(TestPluginV1): - - def setUp(self): - super(TestDeployUndercloud, self).setUp() - - # Get the command object to test - self.cmd = tripleo_deploy.Deploy(self.app, None) - self.cmd.ansible_dir = '/tmp' - - tripleo_deploy.Deploy.heat_pid = mock.MagicMock( - return_value=False) - tripleo_deploy.Deploy.tht_render = '/twd/templates' - tripleo_deploy.Deploy.heat_launch = mock.MagicMock( - side_effect=(lambda *x, **y: None)) - - self.tc = self.app.client_manager.tripleoclient = mock.MagicMock() - self.orc = self.tc.local_orchestration = mock.MagicMock() - self.orc.stacks.create = mock.MagicMock( - return_value={'stack': {'id': 'foo'}}) - - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy._is_undercloud_deploy') - @mock.patch('tripleoclient.utils.check_hostname') - def test_run_preflight_checks(self, mock_check_hostname, mock_uc): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--preflight-validations'], []) - - mock_uc.return_value = False - self.cmd._run_preflight_checks(parsed_args) - mock_check_hostname.called_one_with(False) - - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy._is_undercloud_deploy') - @mock.patch('tripleoclient.utils.check_hostname') - def test_run_preflight_checks_output_only(self, mock_check_hostname, - mock_uc): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--output-only', - '--preflight-validations'], []) - - mock_uc.return_value = False - self.cmd._run_preflight_checks(parsed_args) - mock_check_hostname.assert_not_called() - - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy._is_undercloud_deploy') - @mock.patch('tripleoclient.utils.check_hostname') - def test_run_preflight_checks_disabled(self, mock_check_hostname, - mock_uc): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8'], - []) - - mock_uc.return_value = True - self.cmd._run_preflight_checks(parsed_args) - mock_check_hostname.assert_not_called() - - def test_get_roles_file_path(self): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8'], []) - - roles_file = self.cmd._get_roles_file_path(parsed_args) - self.assertEqual(roles_file, - '/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_standalone.yaml') - - def test_get_roles_file_path_custom_file(self): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--templates', '/tmp/thtroot', - '--roles-file', 'foobar.yaml'], []) - - roles_file = self.cmd._get_roles_file_path(parsed_args) - self.assertEqual(roles_file, 'foobar.yaml') - - def test_get_roles_file_path_custom_templates(self): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--templates', '/tmp/thtroot'], []) - - import pprint - pprint.pprint(parsed_args) - roles_file = self.cmd._get_roles_file_path(parsed_args) - self.assertEqual(roles_file, - '/tmp/thtroot/roles_data_standalone.yaml') - - def test_get_networks_file_path(self): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8'], []) - - networks_file = self.cmd._get_networks_file_path(parsed_args) - self.assertEqual('/dev/null', networks_file) - - def test_get_networks_file_path_custom_file(self): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--networks-file', 'foobar.yaml'], []) - - networks_file = self.cmd._get_networks_file_path(parsed_args) - self.assertEqual('foobar.yaml', networks_file) - - def test_get_networks_file_path_custom_templates(self): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--templates', '/tmp/thtroot'], []) - - networks_file = self.cmd._get_networks_file_path(parsed_args) - self.assertEqual('/dev/null', networks_file) - - @mock.patch('os.path.exists') - @mock.patch('tripleoclient.utils.fetch_roles_file') - def test_get_primary_role_name(self, mock_data, mock_exists): - parsed_args = mock.Mock() - mock_data.return_value = [ - {'name': 'Bar'}, {'name': 'Foo', 'tags': ['primary']} - ] - self.assertEqual( - self.cmd._get_primary_role_name(parsed_args.roles_file, - parsed_args.templates), - 'Foo') - - @mock.patch('tripleoclient.utils.fetch_roles_file', return_value=None) - def test_get_primary_role_name_none_defined(self, mock_data): - parsed_args = self.check_parser(self.cmd, [], []) - self.assertEqual( - self.cmd._get_primary_role_name(parsed_args.roles_file, - parsed_args.templates), - 'Standalone') - - @mock.patch('tripleoclient.utils.fetch_roles_file') - def test_get_primary_role_name_no_primary(self, mock_data): - parsed_args = mock.Mock() - mock_data.return_value = [{'name': 'Bar'}, {'name': 'Foo'}] - self.assertEqual( - self.cmd._get_primary_role_name(parsed_args.roles_file, - parsed_args.templates), - 'Bar') - - @mock.patch('os.path.exists', side_effect=[True, False]) - @mock.patch('shutil.copytree') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - def test_populate_templates_dir(self, mock_workingdirs, mock_copy, - mock_exists): - self.cmd.tht_render = '/foo' - self.cmd._populate_templates_dir('/bar') - mock_workingdirs.assert_called_once() - mock_copy.assert_called_once_with('/bar', '/foo', symlinks=True) - - @mock.patch('os.path.exists', return_value=False) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - def test_populate_templates_dir_bad_source(self, mock_workingdirs, - mock_exists): - self.cmd.tht_render = '/foo' - self.assertRaises(exceptions.NotFound, - self.cmd._populate_templates_dir, '/foo') - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('os.chmod') - @mock.patch('os.path.exists') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleo_common.utils.passwords.generate_passwords') - @mock.patch('yaml.safe_dump') - def test_update_passwords_env_init(self, mock_dump, mock_pw, mock_cc, - mock_exists, mock_chmod, mock_user): - pw_dict = {"GeneratedPassword": 123} - - mock_pw.return_value = pw_dict - mock_exists.return_value = False - - mock_open_context = mock.mock_open() - with mock.patch('builtins.open', mock_open_context): - self.cmd._update_passwords_env(self.temp_homedir, 'stack') - - mock_open_handle = mock_open_context() - mock_dump.assert_called_once_with({'parameter_defaults': pw_dict}, - mock_open_handle, - default_flow_style=False) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('os.chmod') - @mock.patch('os.path.exists') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleo_common.utils.passwords.generate_passwords') - @mock.patch('yaml.safe_dump') - def test_update_passwords_env(self, mock_dump, mock_pw, mock_cc, - mock_exists, mock_chmod, mock_user): - pw_dict = {"GeneratedPassword": 123, "LegacyPass": "override me"} - t_pw_conf_path = os.path.join( - self.temp_homedir, 'tripleo-standalone-passwords.yaml') - - mock_pw.return_value = pw_dict - - old_pw_file = os.path.join(constants.CLOUD_HOME_DIR, - 'tripleo-standalone-passwords.yaml') - - def mock_file_exists(file_name): - return not file_name == old_pw_file - mock_exists.side_effect = mock_file_exists - - with open(t_pw_conf_path, 'w') as t_pw: - t_pw.write('parameter_defaults: {ExistingKey: xyz, ' - 'LegacyPass: pick-me-legacy-tht, ' - 'RpcPassword: pick-me-rpc}\n') - - self.cmd._update_passwords_env(self.temp_homedir, 'stack', - passwords={'ADefault': 456, - 'ExistingKey': - 'dontupdate'}) - expected_dict = { - 'parameter_defaults': {'GeneratedPassword': 123, - 'LegacyPass': 'pick-me-legacy-tht', - 'RpcPassword': 'pick-me-rpc', - 'ExistingKey': 'xyz', - 'ADefault': 456}} - mock_dump.assert_called_once_with(expected_dict, - mock.ANY, - default_flow_style=False) - - @mock.patch('tripleoclient.utils.fetch_roles_file', - return_value={}, autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'get_template_contents', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.environment_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('heatclient.common.template_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_setup_heat_environments', autospec=True) - @mock.patch('tripleo_common.image.kolla_builder.' - 'container_images_prepare_multi') - def test_deploy_tripleo_heat_templates_redir(self, - mock_cipm, - mock_setup_heat_envs, - mock_hc_templ_parse, - mock_hc_env_parse, - mock_hc_get_templ_cont, - mock_hc_process, - mock_role_data): - - with tempfile.NamedTemporaryFile(delete=False) as roles_file: - self.addCleanup(os.unlink, roles_file.name) - - mock_cipm.return_value = {} - - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--templates', '/tmp/thtroot', - '--roles-file', roles_file.name], []) - - mock_setup_heat_envs.return_value = [ - './inside.yaml', '/tmp/thtroot/abs.yaml', - '/tmp/thtroot/puppet/foo.yaml', - '/tmp/thtroot/environments/myenv.yaml', - '/tmp/thtroot42/notouch.yaml', - '../outside.yaml'] - - self.cmd._deploy_tripleo_heat_templates(self.orc, parsed_args) - - mock_hc_process.assert_has_calls([ - mock.call(env_path='./inside.yaml', - include_env_in_files=False), - mock.call(env_path='/twd/templates/abs.yaml', - include_env_in_files=False), - mock.call(env_path='/twd/templates/puppet/foo.yaml', - include_env_in_files=False), - mock.call(env_path='/twd/templates/environments/myenv.yaml', - include_env_in_files=False), - mock.call(env_path='/tmp/thtroot42/notouch.yaml', - include_env_in_files=False), - mock.call(env_path='../outside.yaml', - include_env_in_files=False)]) - - @mock.patch('tripleoclient.utils.fetch_roles_file', - return_value={}, autospec=True) - @mock.patch('tripleoclient.utils.rel_or_abs_path') - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'get_template_contents', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.environment_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('heatclient.common.template_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_setup_heat_environments', autospec=True) - @mock.patch('yaml.safe_dump', autospec=True) - @mock.patch('yaml.safe_load', autospec=True) - @mock.patch('builtins.open') - @mock.patch('tempfile.NamedTemporaryFile', autospec=True) - @mock.patch('tripleo_common.image.kolla_builder.' - 'container_images_prepare_multi') - def test_deploy_tripleo_heat_templates_rewrite(self, - mock_cipm, - mock_temp, mock_open, - mock_yaml_load, - mock_yaml_dump, - mock_setup_heat_envs, - mock_hc_templ_parse, - mock_hc_env_parse, - mock_hc_get_templ_cont, - mock_hc_process, - mock_norm_path, - mock_roles_data): - def hc_process(*args, **kwargs): - if 'abs.yaml' in kwargs['env_path']: - raise hc_exc.CommandError - else: - return ({}, {}) - - mock_cipm.return_value = {} - - mock_hc_process.side_effect = hc_process - - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--templates', '/tmp/thtroot'], []) - - rewritten_env = {'resource_registry': { - 'OS::Foo::Bar': '/twd/outside.yaml', - 'OS::Foo::Baz': '/twd/templates/inside.yaml', - 'OS::Foo::Qux': '/twd/templates/abs.yaml', - 'OS::Foo::Quux': '/tmp/thtroot42/notouch.yaml', - 'OS::Foo::Corge': '/twd/templates/puppet/foo.yaml' - } - } - myenv = {'resource_registry': { - 'OS::Foo::Bar': '../outside.yaml', - 'OS::Foo::Baz': './inside.yaml', - 'OS::Foo::Qux': '/tmp/thtroot/abs.yaml', - 'OS::Foo::Quux': '/tmp/thtroot42/notouch.yaml', - 'OS::Foo::Corge': '/tmp/thtroot/puppet/foo.yaml' - } - } - mock_yaml_load.return_value = myenv - - mock_setup_heat_envs.return_value = [ - './inside.yaml', '/tmp/thtroot/abs.yaml', - '/tmp/thtroot/puppet/foo.yaml', - '/tmp/thtroot/environments/myenv.yaml', - '../outside.yaml'] - - self.cmd._deploy_tripleo_heat_templates(self.orc, parsed_args) - - mock_yaml_dump.assert_has_calls([mock.call(rewritten_env, - default_flow_style=False)]) - - @mock.patch('tripleoclient.utils.check_network_plugin') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy._is_undercloud_deploy') - @mock.patch('tripleoclient.utils.fetch_roles_file', - return_value={}, autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'get_template_contents', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.environment_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('heatclient.common.template_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_setup_heat_environments', autospec=True) - @mock.patch('tripleo_common.image.kolla_builder.' - 'container_images_prepare_multi') - def test_deploy_tripleo_heat_templates_nw_plugin_uc(self, - mock_cipm, - mock_setup_heat_envs, - mock_hc_templ_parse, - mock_hc_env_parse, - mock_hc_get_templ_cont, - mock_hc_process, - mock_role_data, - mock_is_uc, - mock_check_nw_plugin): - - with tempfile.NamedTemporaryFile(delete=False) as roles_file: - self.addCleanup(os.unlink, roles_file.name) - - mock_cipm.return_value = {} - mock_is_uc.return_value = True - - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--upgrade', '--output-dir', '/my', - '--roles-file', roles_file.name], []) - - self.cmd._deploy_tripleo_heat_templates(self.orc, parsed_args) - - mock_check_nw_plugin.assert_called_once_with('/my', {}) - - @mock.patch('tripleoclient.utils.check_network_plugin') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy._is_undercloud_deploy') - @mock.patch('tripleoclient.utils.fetch_roles_file', - return_value={}, autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'process_environment_and_files', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.template_utils.' - 'get_template_contents', return_value=({}, {}), - autospec=True) - @mock.patch('heatclient.common.environment_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('heatclient.common.template_format.' - 'parse', autospec=True, return_value=dict()) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_setup_heat_environments', autospec=True) - @mock.patch('tripleo_common.image.kolla_builder.' - 'container_images_prepare_multi') - def test_deploy_tripleo_heat_templates_nw_plugin_st(self, - mock_cipm, - mock_setup_heat_envs, - mock_hc_templ_parse, - mock_hc_env_parse, - mock_hc_get_templ_cont, - mock_hc_process, - mock_role_data, - mock_is_uc, - mock_check_nw_plugin): - - with tempfile.NamedTemporaryFile(delete=False) as roles_file: - self.addCleanup(os.unlink, roles_file.name) - - mock_cipm.return_value = {} - mock_is_uc.return_value = False - - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--upgrade', '--output-dir', '/my', - '--roles-file', roles_file.name], []) - - self.cmd._deploy_tripleo_heat_templates(self.orc, parsed_args) - - mock_check_nw_plugin.assert_not_called() - - @mock.patch('shutil.copy') - @mock.patch('os.path.exists', return_value=False) - def test_normalize_user_templates(self, mock_exists, mock_copy): - user_tht_root = '/userroot' - tht_root = '/thtroot' - env_files = [ - '/home/basic.yaml', - '/home/dir/dir.yaml', - 'home/relative.yaml', - 'file.yaml', - '~/tilde.yaml', - '../../../dots.yaml', - '/userroot/template.yaml', - '/userroot/tht/tht.yaml', - ] - expected = [ - '/thtroot/basic.yaml', - '/thtroot/dir.yaml', - '/thtroot/relative.yaml', - '/thtroot/file.yaml', - '/thtroot/tilde.yaml', - '/thtroot/dots.yaml', - '/thtroot/template.yaml', - '/thtroot/tht/tht.yaml' - ] - results = self.cmd._normalize_user_templates(user_tht_root, - tht_root, - env_files) - - self.assertEqual(expected, results) - self.assertEqual(mock_copy.call_count, 6) - - @mock.patch('os.path.exists', return_value=True) - def test_normalize_user_templates_exists(self, mock_exists): - user_tht_root = '/userroot' - tht_root = '/thtroot' - env_files = ['/home/basic.yaml'] - self.assertRaises(exceptions.DeploymentError, - self.cmd._normalize_user_templates, - user_tht_root, - tht_root, - env_files) - - @mock.patch('os.path.exists', return_value=True) - def test_normalize_user_templates_trailing_slash(self, mock_exists): - user_tht_root = '/userroot/' - tht_root = '/thtroot' - env_files = ['/userroot/basic.yaml'] - expected = ['/thtroot/basic.yaml'] - results = self.cmd._normalize_user_templates(user_tht_root, - tht_root, - env_files) - self.assertEqual(expected, results) - - def _setup_heat_environments(self, tmpdir, tht_from, - mock_update_pass_env, mock_run, - extra_cmd=None): - cmd_extra = extra_cmd or [] - - tht_outside = os.path.join(tmpdir, 'tht-outside') - os.mkdir(tht_outside) - tht_to = os.path.join(tmpdir, 'tht-to') - os.mkdir(tht_to) - with open(os.path.join(tht_from, 'env.yaml'), - mode='w') as env_file: - yaml.dump({}, env_file) - with open(os.path.join(tht_from, 'foo.yaml'), - mode='w') as env_file: - yaml.dump({}, env_file) - with open(os.path.join(tht_outside, 'outside.yaml'), - mode='w') as env_file: - yaml.dump({}, env_file) - - tht_render = os.path.join(tht_to, 'tripleo-heat-installer-templates') - mock_update_pass_env.return_value = os.path.join( - tht_render, 'passwords.yaml') - mock_run.return_value = 0 - original_abs = os.path.abspath - - def abs_path_stub(*args, **kwargs): - if 'notenv.yaml' in args: - return os.path.join(tht_render, 'notenv.yaml') - if 'env.yaml' in args: - return os.path.join(tht_render, 'env.yaml') - return original_abs(*args, **kwargs) - - # logic handled in _standalone_deploy - self.cmd.output_dir = tht_to - # Note we don't create tht_render as _populate_templates_dir creates it - self.cmd.tht_render = tht_render - self.cmd._populate_templates_dir(tht_from) - - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1/8', - '--templates', tht_from, - '--output-dir', tht_to, - '--hieradata-override', - 'legacy.yaml', - '-e', - os.path.join(tht_from, 'foo.yaml'), - '-e', - os.path.join(tht_outside, - 'outside.yaml'), - ] + cmd_extra, []) - expected_env = [ - os.path.join(tht_render, - 'overcloud-resource-registry-puppet.yaml'), - os.path.join(tht_render, 'passwords.yaml'), - os.path.join(tht_render, - 'tripleoclient-hosts-portmaps.yaml'), - 'hiera_or.yaml', - os.path.join(tht_render, 'foo.yaml'), - os.path.join(tht_render, 'outside.yaml')] - - with mock.patch('os.path.abspath', side_effect=abs_path_stub): - with mock.patch('os.path.isfile'): - environment = self.cmd._setup_heat_environments( - parsed_args.roles_file, parsed_args.networks_file, - parsed_args) - - self.assertEqual(expected_env, environment) - - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs', autospec=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.TripleoInventory', - autospec=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_launch_heat', autospec=True) - @mock.patch('tripleo_common.utils.config.Config', - autospec=True) - @mock.patch('os.path.join', return_value='/twd/inventory.yaml') - @mock.patch('shutil.copyfile') - def test_download_ansible_playbooks(self, mock_shutil, mock_join, - mock_stack_config, - mock_launch_heat, mock_importInv, - createdir_mock): - - fake_output_dir = '/twd' - extra_vars = {'Undercloud': { - 'ansible_connection': 'local', - 'ansible_python_interpreter': sys.executable}} - mock_inventory = mock.Mock() - mock_importInv.return_value = mock_inventory - with mock.patch('sys.stdout', autospec=True) as mock_stdout: - self.cmd.output_dir = fake_output_dir - self.cmd._download_ansible_playbooks(mock_launch_heat, - 'undercloud', - 'Undercloud') - self.assertEqual(mock_stdout.flush.call_count, 2) - mock_inventory.write_static_inventory.assert_called_once_with( - fake_output_dir + '/inventory.yaml', extra_vars) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'container_images_prepare_multi') - def test_prepare_container_images(self, mock_cipm): - env = {'parameter_defaults': {}} - mock_cipm.return_value = {'FooImage': 'foo/bar:baz'} - - self.cmd._prepare_container_images(env, [{'name': 'Compute'}]) - - mock_cipm.assert_called_once_with( - env, - [{'name': 'Compute'}], - dry_run=True, - ) - self.assertEqual( - { - 'parameter_defaults': { - 'FooImage': 'foo/bar:baz' - } - }, - env - ) - - @mock.patch.object( - ansible_runner.runner_config, - 'RunnerConfig', - return_value=fakes.FakeRunnerConfig() - ) - @mock.patch.object( - ansible_runner.Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return() - ) - @mock.patch('os.path.exists') - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_stack_outputs') - @mock.patch('tripleo_common.utils.ansible.' - 'write_default_ansible_cfg') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.chmod') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('subprocess.check_call', autospec=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('os.mkdir') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_populate_templates_dir') - @mock.patch('tripleoclient.utils.archive_deploy_artifacts', - return_value='/tmp/foo.tar.bzip2') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_cleanup_working_dirs') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - @mock.patch('tripleoclient.utils.wait_api_port_ready', - autospec=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_deploy_tripleo_heat_templates', autospec=True, - return_value='undercloud, 0') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_ansible_playbooks', autospec=True, - return_value='/foo') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_launch_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_kill_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_configure_puppet') - @mock.patch('os.geteuid', return_value=0) - @mock.patch('os.environ', return_value='CREATE_COMPLETE') - @mock.patch('tripleoclient.utils.wait_for_stack_ready', return_value=True) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_take_action_standalone(self, mock_dump_artifact, mock_poll, - mock_environ, mock_geteuid, mock_puppet, - mock_killheat, mock_launchheat, - mock_download, mock_tht, - mock_wait_for_port, mock_createdirs, - mock_cleanupdirs, mock_tarball, - mock_templates_dir, mock_open, mock_os, - mock_user, mock_cc, mock_chmod, mock_ac, - mock_outputs, mock_copy, mock_cmdline, - mock_chdir, mock_file_exists, mock_run, - mock_run_prepare): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '--standalone-role', 'Undercloud', - # TODO(cjeanner) drop once we have - # proper oslo.privsep - '--deployment-user', 'stack', - '-e', '/tmp/thtroot/puppet/foo.yaml', - '-e', '/tmp/thtroot//docker/bar.yaml', - '-e', '/tmp/thtroot42/notouch.yaml', - '-e', '~/custom.yaml', - '-e', 'something.yaml', - '-e', '../../../outside.yaml'], []) - - mock_file_exists.return_value = True - fake_orchestration = mock_launchheat(parsed_args) - self.cmd.take_action(parsed_args) - mock_createdirs.assert_called_once() - mock_puppet.assert_called_once() - mock_launchheat.assert_called_with(parsed_args, self.cmd.output_dir) - mock_tht.assert_called_once_with(self.cmd, fake_orchestration, - parsed_args) - mock_download.assert_called_with(self.cmd, fake_orchestration, - 'undercloud', 'Undercloud', - sys.executable) - mock_tarball.assert_called_once() - mock_cleanupdirs.assert_called_once() - self.assertEqual(mock_killheat.call_count, 2) - - @mock.patch.object( - ansible_runner.runner_config, - 'RunnerConfig', - return_value=fakes.FakeRunnerConfig() - ) - @mock.patch.object( - ansible_runner.Runner, - 'run', - return_value=fakes.fake_ansible_runner_run_return(1) - ) - @mock.patch('os.path.exists') - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_stack_outputs') - @mock.patch('tripleo_common.utils.ansible.' - 'write_default_ansible_cfg') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.chmod') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('subprocess.check_call', autospec=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('os.mkdir') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_populate_templates_dir') - @mock.patch('tripleoclient.utils.archive_deploy_artifacts', - return_value='/tmp/foo.tar.bzip2') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_cleanup_working_dirs') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - @mock.patch('tripleoclient.utils.wait_api_port_ready', - autospec=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_deploy_tripleo_heat_templates', autospec=True, - return_value='undercloud, 0') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_ansible_playbooks', autospec=True, - return_value='/foo') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_launch_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_kill_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_configure_puppet') - @mock.patch('os.geteuid', return_value=0) - @mock.patch('os.environ', return_value='CREATE_COMPLETE') - @mock.patch('tripleoclient.utils.wait_for_stack_ready', return_value=True) - @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, - return_value="/foo/inventory.yaml") - def test_take_action_ansible_err(self, mock_dump_artifact, mock_poll, - mock_environ, mock_geteuid, mock_puppet, - mock_killheat, mock_launchheat, - mock_download, mock_tht, - mock_wait_for_port, mock_createdirs, - mock_cleanupdirs, mock_tarball, - mock_templates_dir, mock_open, mock_os, - mock_user, mock_cc, mock_chmod, mock_ac, - mock_outputs, mock_copy, mock_cmdline, - mock_chdir, mock_file_exists, mock_run, - mock_run_prepare): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '--standalone-role', 'Undercloud', - # TODO(cjeanner) drop once we have - # proper oslo.privsep - '--deployment-user', 'stack', - '-e', '/tmp/thtroot/puppet/foo.yaml', - '-e', '/tmp/thtroot//docker/bar.yaml', - '-e', '/tmp/thtroot42/notouch.yaml', - '-e', '~/custom.yaml', - '-e', 'something.yaml', - '-e', '../../../outside.yaml'], []) - - mock_file_exists.return_value = True - fake_orchestration = mock_launchheat(parsed_args) - self.assertRaises(exceptions.DeploymentError, - self.cmd.take_action, parsed_args) - mock_createdirs.assert_called_once() - mock_puppet.assert_called_once() - mock_launchheat.assert_called_with(parsed_args, self.cmd.output_dir) - mock_tht.assert_called_once_with(self.cmd, fake_orchestration, - parsed_args) - mock_download.assert_called_with(self.cmd, fake_orchestration, - 'undercloud', 'Undercloud', - sys.executable) - mock_tarball.assert_called_once() - mock_cleanupdirs.assert_called_once() - self.assertEqual(mock_killheat.call_count, 2) - - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_stack_outputs') - @mock.patch('tripleo_common.utils.ansible.' - 'write_default_ansible_cfg') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.chmod') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('subprocess.check_call', autospec=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('os.mkdir') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_populate_templates_dir') - @mock.patch('tripleoclient.utils.archive_deploy_artifacts', - return_value='/tmp/foo.tar.bzip2') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_cleanup_working_dirs') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - @mock.patch('tripleoclient.utils.wait_api_port_ready') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_deploy_tripleo_heat_templates', autospec=True, - return_value='undercloud, 0') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_ansible_playbooks', autospec=True, - return_value='/foo') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_launch_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_kill_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_configure_puppet') - @mock.patch('os.geteuid', return_value=0) - @mock.patch('os.environ', return_value='CREATE_COMPLETE') - @mock.patch('tripleoclient.utils.wait_for_stack_ready', return_value=True) - def test_take_action_other_err(self, mock_poll, - mock_environ, mock_geteuid, mock_puppet, - mock_killheat, mock_launchheat, - mock_download, mock_tht, - mock_wait_for_port, mock_createdirs, - mock_cleanupdirs, mock_tarball, - mock_templates_dir, mock_open, mock_os, - mock_user, mock_cc, mock_chmod, mock_ac, - mock_outputs, mock_copy, mock_cmdline, - mock_chdir): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '--standalone-role', 'Undercloud', - # TODO(cjeanner) drop once we have - # proper oslo.privsep - '--deployment-user', 'stack', - '-e', '/tmp/thtroot/puppet/foo.yaml', - '-e', '/tmp/thtroot//docker/bar.yaml', - '-e', '/tmp/thtroot42/notouch.yaml', - '-e', '~/custom.yaml', - '-e', 'something.yaml', - '-e', '../../../outside.yaml'], []) - - mock_wait_for_port.side_effect = exceptions.DeploymentError - self.assertRaises(exceptions.DeploymentError, - self.cmd.take_action, parsed_args) - mock_createdirs.assert_called_once() - mock_puppet.assert_called_once() - mock_launchheat.assert_called_with(parsed_args, self.cmd.output_dir) - mock_tht.assert_not_called() - mock_download.assert_not_called() - mock_tarball.assert_called_once() - mock_cleanupdirs.assert_called_once() - self.assertEqual(mock_killheat.call_count, 1) - - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - def test_take_action(self, mock_copy, mock_cmdline): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my'], []) - self.assertRaises(exceptions.DeploymentError, - self.cmd.take_action, parsed_args) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy._standalone_deploy') - def test_take_action_failure(self, mock_deploy, mock_copy, mock_cmdline): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my'], []) - mock_deploy.side_effect = exceptions.DeploymentError - self.assertRaises(exceptions.DeploymentError, - self.cmd.take_action, parsed_args) - mock_copy.assert_called_once() - - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_set_data_rights') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_stack_outputs') - @mock.patch('tripleo_common.utils.ansible.' - 'write_default_ansible_cfg') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.chmod') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.mkdir') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_populate_templates_dir') - @mock.patch('tripleoclient.utils.archive_deploy_artifacts', - return_value='/tmp/foo.tar.bzip2') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_cleanup_working_dirs') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - @mock.patch('tripleoclient.utils.wait_api_port_ready', - autospec=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_deploy_tripleo_heat_templates', autospec=True, - return_value='undercloud, 0') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_ansible_playbooks', autospec=True, - return_value='/foo') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_launch_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_kill_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_configure_puppet') - @mock.patch('os.geteuid', return_value=0) - @mock.patch('os.environ', return_value='CREATE_COMPLETE') - @mock.patch('tripleoclient.utils.wait_for_stack_ready', return_value=True) - def test_standalone_deploy_rc_output_only( - self, mock_poll, - mock_environ, mock_geteuid, mock_puppet, - mock_killheat, mock_launchheat, - mock_download, mock_tht, - mock_wait_for_port, mock_createdirs, - mock_cleanupdirs, mock_tarball, - mock_templates_dir, mock_open, mock_os, - mock_chmod, mock_ac, - mock_outputs, mock_copy, mock_cmdline, - mock_chdir, mock_file_exists, mock_set_data_rights): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '--output-only'], []) - - rc = self.cmd.take_action(parsed_args) - self.assertEqual(None, rc) - - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_set_data_rights') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.reset_cmdline') - @mock.patch('tripleoclient.utils.copy_clouds_yaml') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_stack_outputs') - @mock.patch('tripleo_common.utils.ansible.' - 'write_default_ansible_cfg') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.chmod') - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.mkdir') - @mock.patch('builtins.open') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_populate_templates_dir') - @mock.patch('tripleoclient.utils.archive_deploy_artifacts', - return_value='/tmp/foo.tar.bzip2') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_cleanup_working_dirs') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_create_working_dirs') - @mock.patch('tripleoclient.utils.wait_api_port_ready', - autospec=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_deploy_tripleo_heat_templates', autospec=True, - return_value='undercloud, 0') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_download_ansible_playbooks', autospec=True, - return_value='/foo') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_launch_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_kill_heat') - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.' - '_configure_puppet') - @mock.patch('os.geteuid', return_value=0) - @mock.patch('os.environ', return_value='CREATE_COMPLETE') - @mock.patch('tripleoclient.utils.wait_for_stack_ready', return_value=True) - def test_standalone_deploy_transport( - self, mock_poll, - mock_environ, mock_geteuid, mock_puppet, - mock_killheat, mock_launchheat, - mock_download, mock_tht, - mock_wait_for_port, mock_createdirs, - mock_cleanupdirs, mock_tarball, - mock_templates_dir, mock_open, mock_os, - mock_chmod, mock_ac, - mock_outputs, mock_copy, mock_cmdline, - mock_chdir, mock_file_exists, mock_set_data_rights): - - # Test default transport "local" - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '--output-only'], []) - - self.cmd.take_action(parsed_args) - call = mock.call('/foo', mock.ANY, ssh_private_key=None, - transport='local') - self.assertEqual(mock_ac.mock_calls, [call]) - - # Test transport "ssh" - mock_ac.reset_mock() - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '--output-only', - '--transport', 'ssh'], []) - - self.cmd.take_action(parsed_args) - call = mock.call('/foo', mock.ANY, ssh_private_key=None, - transport='ssh') - self.assertEqual(mock_ac.mock_calls, [call]) diff --git a/tripleoclient/tests/v1/tripleo/test_tripleo_upgrade.py b/tripleoclient/tests/v1/tripleo/test_tripleo_upgrade.py deleted file mode 100644 index 4ebb14bfb..000000000 --- a/tripleoclient/tests/v1/tripleo/test_tripleo_upgrade.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib.tests import utils - -# Load the plugin init module for the plugin list and show commands -from tripleoclient import exceptions -from tripleoclient.v1 import tripleo_upgrade - - -class TestUpgrade(utils.TestCommand): - - def setUp(self): - super(TestUpgrade, self).setUp() - - # Get the command object to test - self.cmd = tripleo_upgrade.Upgrade(self.app, None) - self.cmd.ansible_dir = '/tmp' - self.ansible_playbook_cmd = "ansible-playbook" - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.take_action', - autospec=True) - def test_take_action(self, mock_deploy, mock_confirm): - verifylist = [ - ('local_ip', '127.0.0.1'), - ('templates', '/tmp/thtroot'), - ('stack', 'undercloud'), - ('output_dir', '/my'), - ] - - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '-e', '/tmp/thtroot/puppet/foo.yaml', - '-e', '/tmp/thtroot//docker/bar.yaml', - '-e', '/tmp/thtroot42/notouch.yaml', - '-e', '~/custom.yaml', - '-e', 'something.yaml', - '-e', '../../../outside.yaml'], - verifylist) - - self.cmd.take_action(parsed_args) - parsed_args.standalone = True - parsed_args.upgrade = True - mock_deploy.assert_called_with(self.cmd, parsed_args) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy.take_action', - autospec=True) - def test_take_action_prompt(self, mock_deploy, mock_confirm): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '-e', '/tmp/thtroot/puppet/foo.yaml', - '-e', '/tmp/thtroot//docker/bar.yaml', - '-e', '/tmp/thtroot42/notouch.yaml', - '-e', '~/custom.yaml', - '-e', 'something.yaml', - '-e', '../../../outside.yaml'], []) - self.cmd.take_action(parsed_args) - parsed_args.standlone = True - parsed_args.upgrade = True - mock_deploy.assert_called_with(self.cmd, parsed_args) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=False) - @mock.patch('tripleoclient.v1.tripleo_deploy.Deploy', - autospec=True) - def test_take_action_prompt_no(self, mock_deploy, mock_confirm): - parsed_args = self.check_parser(self.cmd, - ['--local-ip', '127.0.0.1', - '--templates', '/tmp/thtroot', - '--stack', 'undercloud', - '--output-dir', '/my', - '-e', '/tmp/thtroot/puppet/foo.yaml', - '-e', '/tmp/thtroot//docker/bar.yaml', - '-e', '/tmp/thtroot42/notouch.yaml', - '-e', '~/custom.yaml', - '-e', 'something.yaml', - '-e', '../../../outside.yaml'], []) - parsed_args.standlone = True - parsed_args.upgrade = True - self.assertRaises(exceptions.UndercloudUpgradeNotConfirmed, - self.cmd.take_action, parsed_args) - mock_deploy.assert_not_called() diff --git a/tripleoclient/tests/v1/undercloud/__init__.py b/tripleoclient/tests/v1/undercloud/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v1/undercloud/test_backup.py b/tripleoclient/tests/v1/undercloud/test_backup.py deleted file mode 100644 index a69159458..000000000 --- a/tripleoclient/tests/v1/undercloud/test_backup.py +++ /dev/null @@ -1,501 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient import constants -from tripleoclient.tests import fakes -from tripleoclient.v1 import undercloud_backup -from unittest.mock import call - - -class TestUndercloudBackup(utils.TestCommand): - - def setUp(self): - super(TestUndercloudBackup, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = undercloud_backup.BackupUndercloud(self.app, app_args) - self.inventory = '/tmp/test_inventory.yaml' - self.file = open(self.inventory, 'w').close() - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_legacy_withargs(self, mock_playbook): - arglist = [ - '--add-path', - '/tmp/foo.yaml', - '--add-path', - '/tmp/bar.yaml' - ] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - tags=None, - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={'sources_path': - '/home/stack/,/tmp/bar.yaml,/tmp/foo.yaml'}) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_withargs_remove(self, mock_playbook): - arglist = [ - '--add-path', - '/tmp/foo.yaml', - '--exclude-path', - '/tmp/bar.yaml', - '--exclude-path', - '/home/stack/', - '--add-path', - '/tmp/bar.yaml' - ] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - tags=None, - skip_tags=None, - verbosity=3, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - extra_vars={'sources_path': - '/tmp/foo.yaml'}) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_withargs_remove_double(self, mock_playbook): - arglist = [ - '--add-path', - '/tmp/foo.yaml', - '--add-path', - '/tmp/bar.yaml', - '--exclude-path', - '/tmp/foo.yaml', - '--exclude-path', - '/tmp/foo.yaml' - ] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - tags=None, - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={'sources_path': - '/home/stack/,/tmp/bar.yaml'}) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_withargs_remove_unex(self, mock_playbook): - arglist = [ - '--add-path', - '/tmp/foo.yaml', - '--exclude-path', - '/tmp/non-existing-path.yaml' - ] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - tags=None, - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars={'sources_path': - '/home/stack/,/tmp/foo.yaml'}) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_noargs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_create_recover_image', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_init(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--init' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_init_nfs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--init', - 'nfs' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_setup_nfs(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-nfs' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_db_only(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--db-only' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-undercloud-db-backup.yaml', - inventory=parsed_args.inventory, - tags=None, - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_setup_rear(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-rear' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_setup_rear_extra_vars_inline(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-rear', - '--extra-vars', - '{"tripleo_backup_and_restore_nfs_server": "192.168.24.1"}' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - extra_vars_dict = { - 'tripleo_backup_and_restore_nfs_server': '192.168.24.1' - } - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=extra_vars_dict - ) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_db_only_with_setup_options(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--db-only', - '--setup-nfs', - '--setup-rear' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-undercloud-db-backup.yaml', - inventory=parsed_args.inventory, - tags=None, - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_undercloud_backup_setup_nfs_rear_with_inventory(self, - mock_playbook): - arglist = [ - '--setup-nfs', - '--setup-rear', - '--inventory', - self.inventory - ] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - calls = [call(workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None), - call(workdir=mock.ANY, - playbook='prepare-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None)] - - mock_playbook.assert_has_calls(calls) - - @mock.patch('os.path.isfile') - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_setup_nfs_with_extra_vars(self, - mock_playbook, - mock_access, - mock_isfile): - arglist = [ - '--setup-nfs', - '--extra-vars', - '/tmp/test_vars.yaml' - ] - verifylist = [] - mock_isfile.return_value = True - mock_access.return_value = True - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars='/tmp/test_vars.yaml' - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_inventory(self, mock_playbook): - arglist = [ - '--inventory', - self.inventory - ] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_create_recover_image', - skip_tags=None, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=3, - extra_vars=None - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_no_inventory(self, mock_playbook): - arglist = [ - '--inventory', - '/tmp/no_inventory.yaml' - ] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaisesRegex( - RuntimeError, - 'The inventory file', - self.cmd.take_action, - parsed_args) - - @mock.patch('os.access') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_undercloud_backup_no_readable_inventory(self, - mock_playbook, - mock_access): - arglist = [ - '--inventory', - self.inventory - ] - verifylist = [] - mock_access.return_value = False - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaisesRegex( - RuntimeError, - 'The inventory file', - self.cmd.take_action, - parsed_args) diff --git a/tripleoclient/tests/v1/undercloud/test_config.py b/tripleoclient/tests/v1/undercloud/test_config.py deleted file mode 100644 index efb53f3a7..000000000 --- a/tripleoclient/tests/v1/undercloud/test_config.py +++ /dev/null @@ -1,1173 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives import serialization -from cryptography import x509 -from cryptography.x509.oid import NameOID -from datetime import datetime -from datetime import timedelta -from unittest import mock -import fixtures -import os -import tempfile -import yaml - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture - -from tripleo_common.image import kolla_builder - -from tripleoclient import exceptions -from tripleoclient.tests import base -from tripleoclient.v1 import undercloud_config - - -class TestProcessDriversAndHardwareTypes(base.TestCase): - def setUp(self): - super(TestProcessDriversAndHardwareTypes, self).setUp() - self.conf = mock.Mock(**{key: getattr(undercloud_config.CONF, key) - for key in ( - 'enabled_hardware_types', - 'enable_node_discovery', - 'discovery_default_driver', - 'ironic_enabled_network_interfaces', - 'ironic_default_network_interface')}) - - def test_defaults(self): - env = {} - undercloud_config._process_drivers_and_hardware_types(self.conf, env) - self.assertEqual({ - 'IronicEnabledNetworkInterfaces': ['flat'], - 'IronicDefaultNetworkInterface': 'flat', - 'IronicEnabledHardwareTypes': ['ipmi', 'redfish', 'ilo', 'idrac'], - 'IronicEnabledBootInterfaces': ['ipxe', 'pxe', 'ilo-pxe'], - 'IronicEnabledBiosInterfaces': ['no-bios', 'ilo', 'redfish'], - 'IronicEnabledDeployInterfaces': ['direct', 'ansible', 'ramdisk'], - 'IronicEnabledInspectInterfaces': ['inspector', 'no-inspect', - 'redfish', 'idrac', 'ilo'], - 'IronicEnabledManagementInterfaces': ['ipmitool', 'redfish', - 'idrac', 'ilo', 'fake', - 'noop'], - 'IronicEnabledPowerInterfaces': ['ipmitool', 'redfish', - 'idrac', 'ilo', 'fake'], - 'IronicEnabledRaidInterfaces': ['no-raid', 'idrac'], - 'IronicEnabledVendorInterfaces': ['no-vendor', 'ipmitool', 'idrac'] - }, env) - - def test_one_hardware_type_with_discovery(self): - env = {} - self.conf.enabled_hardware_types = ['redfish'] - self.conf.enable_node_discovery = True - - undercloud_config._process_drivers_and_hardware_types(self.conf, env) - self.assertEqual({ - 'IronicEnabledNetworkInterfaces': ['flat'], - 'IronicDefaultNetworkInterface': 'flat', - # ipmi added because it's the default discovery driver - 'IronicEnabledHardwareTypes': ['redfish', 'ipmi'], - 'IronicEnabledBootInterfaces': ['ipxe', 'pxe'], - 'IronicEnabledBiosInterfaces': ['no-bios', 'redfish'], - 'IronicEnabledDeployInterfaces': ['direct', 'ansible', 'ramdisk'], - 'IronicEnabledInspectInterfaces': ['inspector', 'no-inspect', - 'redfish'], - 'IronicEnabledManagementInterfaces': ['ipmitool', 'redfish', - 'fake', 'noop'], - 'IronicEnabledPowerInterfaces': ['ipmitool', 'redfish', 'fake'], - 'IronicEnabledRaidInterfaces': ['no-raid'], - 'IronicEnabledVendorInterfaces': ['no-vendor', 'ipmitool'], - 'IronicInspectorDiscoveryDefaultDriver': 'ipmi', - 'IronicInspectorEnableNodeDiscovery': True - }, env) - - def test_all_hardware_types(self): - env = {} - self.conf.enabled_hardware_types = ( - self.conf.enabled_hardware_types + ['staging-ovirt', 'snmp', - 'irmc', 'xclarity', - 'fake-hardware'] - ) - - undercloud_config._process_drivers_and_hardware_types(self.conf, env) - self.assertEqual({ - 'IronicEnabledNetworkInterfaces': ['flat'], - 'IronicDefaultNetworkInterface': 'flat', - 'IronicEnabledHardwareTypes': ['ipmi', 'redfish', 'ilo', 'idrac', - 'staging-ovirt', 'snmp', 'irmc', - 'xclarity', 'fake-hardware'], - 'IronicEnabledBootInterfaces': ['ipxe', 'pxe', 'ilo-pxe', - 'irmc-pxe', 'fake'], - 'IronicEnabledBiosInterfaces': ['no-bios', 'ilo', 'irmc', - 'redfish'], - 'IronicEnabledDeployInterfaces': ['direct', 'ansible', 'ramdisk', - 'fake'], - 'IronicEnabledInspectInterfaces': ['inspector', 'no-inspect', - 'redfish', 'idrac', 'ilo', - 'irmc'], - 'IronicEnabledManagementInterfaces': ['ipmitool', 'redfish', - 'idrac', 'ilo', 'irmc', - 'staging-ovirt', 'xclarity', - 'fake', 'noop'], - 'IronicEnabledPowerInterfaces': ['ipmitool', 'redfish', 'idrac', - 'ilo', 'irmc', 'staging-ovirt', - 'xclarity', 'fake', 'snmp'], - 'IronicEnabledRaidInterfaces': ['no-raid', 'idrac'], - 'IronicEnabledVendorInterfaces': ['no-vendor', 'ipmitool', 'idrac'] - }, env) - - -class TestBaseNetworkSettings(base.TestCase): - def setUp(self): - super(TestBaseNetworkSettings, self).setUp() - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - # don't actually load config from ~/undercloud.conf - self.mock_config_load = self.useFixture( - fixtures.MockPatch('tripleoclient.utils.load_config')) - self.conf.config(local_ip='192.168.24.1/24', - undercloud_admin_host='192.168.24.3', - undercloud_public_host='192.168.24.2', - undercloud_nameservers=['10.10.10.10', '10.10.10.11']) - # ctlplane network - config group options - self.grp0 = cfg.OptGroup(name='ctlplane-subnet', - title='ctlplane-subnet') - self.opts = [cfg.StrOpt('cidr'), - cfg.ListOpt('dhcp_start'), - cfg.ListOpt('dhcp_end'), - cfg.ListOpt('dhcp_exclude'), - cfg.StrOpt('inspection_iprange'), - cfg.StrOpt('gateway'), - cfg.BoolOpt('masquerade'), - cfg.ListOpt('host_routes', - item_type=cfg.types.Dict(bounds=True), - bounds=True,), - cfg.ListOpt('dns_nameservers')] - self.conf.register_opts(self.opts, group=self.grp0) - self.grp1 = cfg.OptGroup(name='subnet1', title='subnet1') - self.grp2 = cfg.OptGroup(name='subnet2', title='subnet2') - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', - dhcp_end='192.168.24.24', - dhcp_exclude=[], - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', - masquerade=False, - host_routes=[], - dns_nameservers=[], - group='ctlplane-subnet') - - -class TestNetworkSettings(TestBaseNetworkSettings): - def test_default(self): - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.5', 'end': '192.168.24.24'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_ipv6_control_plane_stateless_default(self): - env = {} - self.conf.config(local_ip='fd12:3456:789a:1::2/64', - undercloud_admin_host='fd12:3456:789a:1::3', - undercloud_public_host='fd12:3456:789a:1::4') - self.conf.config(cidr='fd12:3456:789a:1::/64', - dhcp_start='fd12:3456:789a:1::10', - dhcp_end='fd12:3456:789a:1::20', - dhcp_exclude=[], - dns_nameservers=['fd12:3456:789a:1::5', - 'fd12:3456:789a:1::6'], - inspection_iprange=('fd12:3456:789a:1::30,' - 'fd12:3456:789a:1::40'), - gateway='fd12:3456:789a:1::1', - masquerade=False, - host_routes=[], - group='ctlplane-subnet') - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': 'fd12:3456:789a:1::1', - 'host_routes': [], - 'ip_range': 'fd12:3456:789a:1::,static', - 'netmask': 'ffff:ffff:ffff:ffff::', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'fd12:3456:789a:1::/64': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': 'fd12:3456:789a:1::10', - 'end': 'fd12:3456:789a:1::20'}], - 'DnsNameServers': ['fd12:3456:789a:1::5', - 'fd12:3456:789a:1::6'], - 'HostRoutes': [], - 'NetworkCidr': 'fd12:3456:789a:1::/64', - 'NetworkGateway': 'fd12:3456:789a:1::1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_ipv6_control_plane_stateful(self): - env = {} - self.conf.config(local_ip='fd12:3456:789a:1::2/64', - undercloud_admin_host='fd12:3456:789a:1::3', - undercloud_public_host='fd12:3456:789a:1::4', - ipv6_address_mode='dhcpv6-stateful') - self.conf.config(cidr='fd12:3456:789a:1::/64', - dhcp_start='fd12:3456:789a:1::10', - dhcp_end='fd12:3456:789a:1::20', - dhcp_exclude=[], - dns_nameservers=['fd12:3456:789a:1::5', - 'fd12:3456:789a:1::6'], - inspection_iprange=('fd12:3456:789a:1::30,' - 'fd12:3456:789a:1::40'), - gateway='fd12:3456:789a:1::1', - masquerade=False, - host_routes=[], - group='ctlplane-subnet') - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': 'fd12:3456:789a:1::1', - 'host_routes': [], - 'ip_range': 'fd12:3456:789a:1::30,fd12:3456:789a:1::40', - 'netmask': 'ffff:ffff:ffff:ffff::', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'fd12:3456:789a:1::/64': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': 'fd12:3456:789a:1::10', - 'end': 'fd12:3456:789a:1::20'}], - 'DnsNameServers': ['fd12:3456:789a:1::5', - 'fd12:3456:789a:1::6'], - 'HostRoutes': [], - 'NetworkCidr': 'fd12:3456:789a:1::/64', - 'NetworkGateway': 'fd12:3456:789a:1::1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateful', - } - self.assertEqual(expected, env) - - def test_nameserver_toomany_fail(self): - env = {} - self.conf.config(undercloud_nameservers=['1.1.1.1', '1.1.1.2', - '1.1.1.3', '1.1.1.4', - '1.1.1.5', '1.1.1.6']) - self.assertRaises(exceptions.InvalidConfiguration, - undercloud_config._process_network_args, - env) - - def test_undercloud_ips_duplicated_fail(self): - env = {} - - # local_ip == undercloud_admin_host - self.conf.config(local_ip='192.168.24.1/24', - undercloud_admin_host='192.168.24.1', - undercloud_public_host='192.168.24.2', - generate_service_certificate=True) - self.assertRaises(exceptions.InvalidConfiguration, - undercloud_config._process_network_args, - env) - - # local_ip == undercloud_public_host - self.conf.config(local_ip='192.168.24.1/24', - undercloud_admin_host='192.168.24.3', - undercloud_public_host='192.168.24.1', - generate_service_certificate=True) - undercloud_config._process_network_args(env) - - # undercloud_admin_host == undercloud_public_host - self.conf.config(local_ip='192.168.24.1/24', - undercloud_admin_host='192.168.24.2', - undercloud_public_host='192.168.24.2', - generate_service_certificate=True) - undercloud_config._process_network_args(env) - - # We do not care about ip duplication when ssl is disabled - self.conf.config(local_ip='192.168.24.1/24', - undercloud_admin_host='192.168.24.1', - undercloud_public_host='192.168.24.2', - generate_service_certificate=False, - undercloud_service_certificate=None) - undercloud_config._process_network_args(env) - - def test_start_end_all_addresses(self): - self.conf.config(dhcp_start='192.168.24.0', - dhcp_end='192.168.24.255', - group='ctlplane-subnet') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.4', 'end': '192.168.24.99'}, - {'start': '192.168.24.121', 'end': '192.168.24.254'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_ignore_dhcp_start_end_if_default_but_cidr_not_default(self): - self.conf.config(cidr='192.168.10.0/24', - inspection_iprange='192.168.10.100,192.168.10.120', - gateway='192.168.10.1', - group='ctlplane-subnet') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.10.1', - 'host_routes': [], - 'ip_range': '192.168.10.100,192.168.10.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.10.2', 'end': '192.168.10.99'}, - {'start': '192.168.10.121', 'end': '192.168.10.254'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_dhcp_exclude(self): - self.conf.config(cidr='192.168.10.0/24', - inspection_iprange='192.168.10.100,192.168.10.120', - gateway='192.168.10.1', - dhcp_exclude=['192.168.10.50', - '192.168.10.80-192.168.10.89'], - group='ctlplane-subnet') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.10.1', - 'host_routes': [], - 'ip_range': '192.168.10.100,192.168.10.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.10.2', 'end': '192.168.10.49'}, - {'start': '192.168.10.51', 'end': '192.168.10.79'}, - {'start': '192.168.10.90', 'end': '192.168.10.99'}, - {'start': '192.168.10.121', 'end': '192.168.10.254'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_no_dhcp_start_no_dhcp_end(self): - self.conf.config(dhcp_start=[], - dhcp_end=[], - group='ctlplane-subnet') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.4', 'end': '192.168.24.99'}, - {'start': '192.168.24.121', 'end': '192.168.24.254'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}}, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_dhcp_start_no_dhcp_end(self): - self.conf.config(dhcp_start='192.168.24.10', - dhcp_end=[], - group='ctlplane-subnet') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.10', 'end': '192.168.24.99'}, - {'start': '192.168.24.121', 'end': '192.168.24.254'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_dhcp_end_no_dhcp_start(self): - self.conf.config(dhcp_start=[], - dhcp_end='192.168.24.220', - group='ctlplane-subnet') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.4', 'end': '192.168.24.99'}, - {'start': '192.168.24.121', 'end': '192.168.24.220'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_routed_network(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.grp2) - self.conf.config(masquerade=True, - dns_nameservers=['10.1.1.100', '10.1.1.101'], - group='ctlplane-subnet') - self.conf.config(cidr='192.168.10.0/24', - dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - dhcp_exclude=[], - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', - dns_nameservers=['10.2.2.100', '10.2.2.101'], - host_routes=[], - masquerade=True, - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', - dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - dhcp_exclude=[], - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', - dns_nameservers=['10.3.3.100', '10.3.3.101'], - host_routes=[], - masquerade=True, - group='subnet2') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [ - {'destination': '192.168.10.0/24', 'nexthop': '192.168.24.1'}, - {'destination': '192.168.20.0/24', 'nexthop': '192.168.24.1'}], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}, - {'gateway': '192.168.10.254', - 'host_routes': [], - 'ip_range': '192.168.10.100,192.168.10.189', - 'netmask': '255.255.255.0', - 'tag': 'subnet1', - 'mtu': 1500}, - {'gateway': '192.168.20.254', - 'host_routes': [], - 'ip_range': '192.168.20.100,192.168.20.189', - 'netmask': '255.255.255.0', - 'tag': 'subnet2', - 'mtu': 1500} - ], - 'MasqueradeNetworks': { - '192.168.10.0/24': ['192.168.24.0/24', - '192.168.10.0/24', - '192.168.20.0/24'], - '192.168.20.0/24': ['192.168.24.0/24', - '192.168.10.0/24', - '192.168.20.0/24'], - '192.168.24.0/24': ['192.168.24.0/24', - '192.168.10.0/24', - '192.168.20.0/24']}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'subnet1', - '192.168.20.0/24': 'subnet2', - '192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - # The ctlplane-subnet subnet have defaults - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.5', 'end': '192.168.24.24'}], - 'DnsNameServers': ['10.1.1.100', '10.1.1.101'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - 'subnet1': { - 'AllocationPools': [ - {'start': '192.168.10.10', 'end': '192.168.10.99'}], - 'DnsNameServers': ['10.2.2.100', '10.2.2.101'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.254'}, - 'subnet2': { - 'AllocationPools': [ - {'start': '192.168.20.10', 'end': '192.168.20.99'}], - 'DnsNameServers': ['10.3.3.100', '10.3.3.101'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.20.0/24', - 'NetworkGateway': '192.168.20.254'} - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_routed_network_no_masquerading(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.grp2) - self.conf.config(cidr='192.168.10.0/24', - dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - dhcp_exclude=[], - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', - dns_nameservers=[], - host_routes=[], - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', - dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - dhcp_exclude=[], - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', - dns_nameservers=[], - host_routes=[], - group='subnet2') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [ - {'destination': '192.168.10.0/24', 'nexthop': '192.168.24.1'}, - {'destination': '192.168.20.0/24', 'nexthop': '192.168.24.1'}], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}, - {'gateway': '192.168.10.254', - 'host_routes': [], - 'ip_range': '192.168.10.100,192.168.10.189', - 'netmask': '255.255.255.0', - 'tag': 'subnet1', - 'mtu': 1500}, - {'gateway': '192.168.20.254', - 'host_routes': [], - 'ip_range': '192.168.20.100,192.168.20.189', - 'netmask': '255.255.255.0', - 'tag': 'subnet2', - 'mtu': 1500} - ], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'subnet1', - '192.168.20.0/24': 'subnet2', - '192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - # The ctlplane-subnet subnet have defaults - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.5', 'end': '192.168.24.24'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - 'subnet1': { - 'AllocationPools': [ - {'start': '192.168.10.10', 'end': '192.168.10.99'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.254'}, - 'subnet2': { - 'AllocationPools': [ - {'start': '192.168.20.10', 'end': '192.168.20.99'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.20.0/24', - 'NetworkGateway': '192.168.20.254'} - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_no_allocation_pool_on_remote_network(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.config(cidr='192.168.10.0/24', - dhcp_exclude=[], - inspection_iprange='192.168.10.200,192.168.10.254', - gateway='192.168.10.254', - dns_nameservers=[], - host_routes=[], - masquerade=False, - group='subnet1') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [ - {'destination': '192.168.10.0/24', 'nexthop': '192.168.24.1'}], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}, - {'gateway': '192.168.10.254', - 'host_routes': [], - 'ip_range': '192.168.10.200,192.168.10.254', - 'netmask': '255.255.255.0', - 'tag': 'subnet1', - 'mtu': 1500}, - ], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'subnet1', - '192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - # The ctlplane-subnet subnet have defaults - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.5', 'end': '192.168.24.24'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - 'subnet1': { - 'AllocationPools': [ - {'start': '192.168.10.1', 'end': '192.168.10.199'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.254'} - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_no_allocation_pool_on_remote_network_three_pools(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.config(cidr='192.168.10.0/24', - dhcp_exclude=[], - inspection_iprange='192.168.10.100,192.168.10.199', - gateway='192.168.10.222', - dns_nameservers=[], - host_routes=[], - masquerade=False, - group='subnet1') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [ - {'destination': '192.168.10.0/24', 'nexthop': '192.168.24.1'}], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}, - {'gateway': '192.168.10.222', - 'host_routes': [], - 'ip_range': '192.168.10.100,192.168.10.199', - 'netmask': '255.255.255.0', - 'tag': 'subnet1', - 'mtu': 1500}, - ], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'subnet1', - '192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - # The ctlplane-subnet subnet have defaults - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.5', 'end': '192.168.24.24'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - 'subnet1': { - 'AllocationPools': [ - {'start': '192.168.10.1', 'end': '192.168.10.99'}, - {'start': '192.168.10.200', 'end': '192.168.10.221'}, - {'start': '192.168.10.223', 'end': '192.168.10.254'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.222'} - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless', - } - self.assertEqual(expected, env) - - def test_additional_host_routes(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.config(host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - group='ctlplane-subnet') - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.grp2) - self.conf.config(cidr='192.168.10.0/24', - dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - dhcp_exclude=[], - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', - dns_nameservers=[], - host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.10.254'}], - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', - dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - dhcp_exclude=[], - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', - dns_nameservers=[], - host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.20.254'}], - group='subnet2') - env = {} - undercloud_config._process_network_args(env) - expected = { - 'ControlPlaneStaticRoutes': [ - {'destination': '192.168.10.0/24', 'nexthop': '192.168.24.1'}, - {'destination': '192.168.20.0/24', 'nexthop': '192.168.24.1'}, - {'destination': '10.10.10.254/32', 'nexthop': '192.168.24.1'}], - 'IronicInspectorSubnets': [ - {'gateway': '192.168.24.1', - 'host_routes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - 'ip_range': '192.168.24.100,192.168.24.120', - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet', - 'mtu': 1500}, - {'gateway': '192.168.10.254', - 'host_routes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.10.254'}], - 'ip_range': '192.168.10.100,192.168.10.189', - 'netmask': '255.255.255.0', - 'tag': 'subnet1', - 'mtu': 1500}, - {'gateway': '192.168.20.254', - 'host_routes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.20.254'}], - 'ip_range': '192.168.20.100,192.168.20.189', - 'netmask': '255.255.255.0', - 'tag': 'subnet2', - 'mtu': 1500} - ], - 'MasqueradeNetworks': {}, - 'PortPhysnetCidrMap': {'192.168.10.0/24': 'subnet1', - '192.168.20.0/24': 'subnet2', - '192.168.24.0/24': 'ctlplane'}, - 'UndercloudCtlplaneSubnets': { - # The ctlplane-subnet subnet have defaults - 'ctlplane-subnet': { - 'AllocationPools': [ - {'start': '192.168.24.5', 'end': '192.168.24.24'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - 'NetworkCidr': '192.168.24.0/24', - 'NetworkGateway': '192.168.24.1'}, - 'subnet1': { - 'AllocationPools': [ - {'start': '192.168.10.10', 'end': '192.168.10.99'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.10.254'}], - 'NetworkCidr': '192.168.10.0/24', - 'NetworkGateway': '192.168.10.254'}, - 'subnet2': { - 'AllocationPools': [ - {'start': '192.168.20.10', 'end': '192.168.20.99'}], - 'DnsNameServers': ['10.10.10.10', '10.10.10.11'], - 'HostRoutes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.20.254'}], - 'NetworkCidr': '192.168.20.0/24', - 'NetworkGateway': '192.168.20.254'} - }, - 'UndercloudCtlplaneIPv6AddressMode': 'dhcpv6-stateless' - } - self.assertEqual(expected, env) - - def test_generate_inspection_subnets(self): - result = undercloud_config._generate_inspection_subnets() - expected = [{'gateway': '192.168.24.1', - 'host_routes': [], - 'ip_range': '192.168.24.100,192.168.24.120', - 'mtu': 1500, - 'netmask': '255.255.255.0', - 'tag': 'ctlplane-subnet'}] - self.assertEqual(expected, result) - - def test_generate_inspection_subnets_invalid(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1']) - self.conf.config(host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - group='ctlplane-subnet') - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.config(group='subnet1') - self.assertRaises(exceptions.DeploymentError, - undercloud_config._generate_inspection_subnets) - - def test__env_set_undercloud_ctlplane_networks_attribues(self): - self.conf.config(local_subnet='ctlplane-subnet', - local_mtu=1444, - undercloud_nameservers=['192.168.24.253', - '192.168.24.252']) - self.conf.config(cidr='192.168.24.0/24', - gateway='192.168.24.254', - host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - group='ctlplane-subnet') - env = {} - undercloud_config._env_set_undercloud_ctlplane_networks_attribues(env) - expected = { - 'CtlplaneNetworkAttributes': { - 'network': {'mtu': 1444}, - 'subnets': { - 'ctlplane-subnet': { - 'cidr': '192.168.24.0/24', - 'dns_nameservers': ['192.168.24.253', - '192.168.24.252'], - 'gateway_ip': '192.168.24.254', - 'host_routes': [{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - 'tags': []}}}} - self.assertEqual(expected, env) - - def test__env_set_undercloud_ctlplane_networks_attribues_routed(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.config(host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - group='ctlplane-subnet') - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.grp2) - self.conf.config(cidr='192.168.10.0/24', - dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - dhcp_exclude=[], - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', - dns_nameservers=[], - host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.10.254'}], - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', - dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - dhcp_exclude=[], - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', - dns_nameservers=[], - host_routes=[{'destination': '10.10.10.254/32', - 'nexthop': '192.168.20.254'}], - group='subnet2') - env = {} - undercloud_config._env_set_undercloud_ctlplane_networks_attribues(env) - expected = { - 'CtlplaneNetworkAttributes': { - 'network': {'mtu': 1500}, - 'subnets': { - 'ctlplane-subnet': { - 'cidr': '192.168.24.0/24', - 'dns_nameservers': ['10.10.10.10', '10.10.10.11'], - 'gateway_ip': '192.168.24.1', - 'host_routes': [{'destination': '192.168.10.0/24', - 'nexthop': '192.168.24.1'}, - {'destination': '192.168.20.0/24', - 'nexthop': '192.168.24.1'}, - {'destination': '10.10.10.254/32', - 'nexthop': '192.168.24.1'}], - 'tags': []}}}} - self.assertEqual(expected, env) - - -class TestChronySettings(TestBaseNetworkSettings): - def test_default(self): - env = {} - undercloud_config._process_chrony_acls(env) - expected = { - 'ChronyAclRules': ['allow 192.168.24.0/24'], - } - self.assertEqual(expected, env) - - -class TestTLSSettings(base.TestCase): - def test_public_host_with_ip_should_give_ip_endpoint_environment(self): - expected_env_file = os.path.join( - undercloud_config.THT_HOME, - "environments/ssl/tls-endpoints-public-ip.yaml") - - resulting_env_file1 = undercloud_config._get_tls_endpoint_environment( - '127.0.0.1', undercloud_config.THT_HOME) - - self.assertEqual(expected_env_file, resulting_env_file1) - - resulting_env_file2 = undercloud_config._get_tls_endpoint_environment( - '192.168.1.1', undercloud_config.THT_HOME) - - self.assertEqual(expected_env_file, resulting_env_file2) - - def test_public_host_with_fqdn_should_give_dns_endpoint_environment(self): - expected_env_file = os.path.join( - undercloud_config.THT_HOME, - "environments/ssl/tls-endpoints-public-dns.yaml") - - resulting_env_file1 = undercloud_config._get_tls_endpoint_environment( - 'controller-1', undercloud_config.THT_HOME) - - self.assertEqual(expected_env_file, resulting_env_file1) - - resulting_env_file2 = undercloud_config._get_tls_endpoint_environment( - 'controller-1.tripleodomain.com', undercloud_config.THT_HOME) - - self.assertEqual(expected_env_file, resulting_env_file2) - - def get_certificate_and_private_key(self): - private_key = rsa.generate_private_key(public_exponent=3, - key_size=1024, - backend=default_backend()) - issuer = x509.Name([ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"FI"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Helsinki"), - x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Some Company"), - x509.NameAttribute(NameOID.COMMON_NAME, u"Test Certificate"), - ]) - cert_builder = x509.CertificateBuilder( - issuer_name=issuer, subject_name=issuer, - public_key=private_key.public_key(), - serial_number=x509.random_serial_number(), - not_valid_before=datetime.utcnow(), - not_valid_after=datetime.utcnow() + timedelta(days=10) - ) - cert = cert_builder.sign(private_key, - hashes.SHA256(), - default_backend()) - cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM) - key_pem = private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption()) - return cert_pem, key_pem - - def test_get_dict_with_cert_and_key_from_bundled_pem(self): - cert_pem, key_pem = self.get_certificate_and_private_key() - - with tempfile.NamedTemporaryFile() as tempbundle: - tempbundle.write(cert_pem) - tempbundle.write(key_pem) - tempbundle.seek(0) - - tls_parameters = undercloud_config._get_public_tls_parameters( - tempbundle.name) - - self.assertEqual(cert_pem, tls_parameters['SSLCertificate']) - self.assertEqual(key_pem, tls_parameters['SSLKey']) - - def test_get_tls_parameters_fails_cause_of_missing_cert(self): - _, key_pem = self.get_certificate_and_private_key() - - with tempfile.NamedTemporaryFile() as tempbundle: - tempbundle.write(key_pem) - tempbundle.seek(0) - - self.assertRaises(ValueError, - undercloud_config._get_public_tls_parameters, - tempbundle.name) - - def test_get_tls_parameters_fails_cause_of_missing_key(self): - cert_pem, _ = self.get_certificate_and_private_key() - - with tempfile.NamedTemporaryFile() as tempbundle: - tempbundle.write(cert_pem) - tempbundle.seek(0) - - self.assertRaises(ValueError, - undercloud_config._get_public_tls_parameters, - tempbundle.name) - - def test_get_tls_parameters_fails_cause_of_unexistent_file(self): - self.assertRaises(IOError, - undercloud_config._get_public_tls_parameters, - '/tmp/unexistent-file-12345.pem') - - -class TestContainerImageConfig(base.TestCase): - def setUp(self): - super(TestContainerImageConfig, self).setUp() - conf_keys = ( - 'container_images_file', - ) - self.conf = mock.Mock(**{key: getattr(undercloud_config.CONF, key) - for key in conf_keys}) - - @mock.patch('shutil.copy') - def test_defaults(self, mock_copy): - env = {} - deploy_args = [] - cip_default = getattr(kolla_builder, - 'CONTAINER_IMAGE_PREPARE_PARAM', None) - self.addCleanup(setattr, kolla_builder, - 'CONTAINER_IMAGE_PREPARE_PARAM', cip_default) - - setattr(kolla_builder, 'CONTAINER_IMAGE_PREPARE_PARAM', [{ - 'set': { - 'namespace': 'one', - 'name_prefix': 'two', - 'name_suffix': 'three', - 'tag': 'four', - }, - 'tag_from_label': 'five', - }]) - - undercloud_config._container_images_config(self.conf, deploy_args, - env, None) - self.assertEqual([], deploy_args) - cip = env['ContainerImagePrepare'][0] - set = cip['set'] - - self.assertEqual( - 'one', set['namespace']) - self.assertEqual( - 'two', set['name_prefix']) - self.assertEqual( - 'three', set['name_suffix']) - self.assertEqual( - 'four', set['tag']) - self.assertEqual( - 'five', cip['tag_from_label']) - - @mock.patch('shutil.copy') - def test_container_images_file(self, mock_copy): - env = {} - deploy_args = [] - self.conf.container_images_file = '/tmp/container_images_file.yaml' - undercloud_config._container_images_config(self.conf, deploy_args, - env, None) - self.assertEqual(['-e', '/tmp/container_images_file.yaml'], - deploy_args) - self.assertEqual({}, env) - - @mock.patch('shutil.copy') - def test_custom(self, mock_copy): - env = {} - deploy_args = [] - with tempfile.NamedTemporaryFile(mode='w') as f: - yaml.dump({ - 'parameter_defaults': {'ContainerImagePrepare': [{ - 'set': { - 'namespace': 'one', - 'name_prefix': 'two', - 'name_suffix': 'three', - 'tag': 'four', - }, - 'tag_from_label': 'five', - }]} - }, f) - self.conf.container_images_file = f.name - cif_name = f.name - - undercloud_config._container_images_config( - self.conf, deploy_args, env, None) - self.assertEqual(['-e', cif_name], deploy_args) diff --git a/tripleoclient/tests/v1/undercloud/test_install_upgrade.py b/tripleoclient/tests/v1/undercloud/test_install_upgrade.py deleted file mode 100644 index 87022bef7..000000000 --- a/tripleoclient/tests/v1/undercloud/test_install_upgrade.py +++ /dev/null @@ -1,926 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import fixtures -import json -import os -from unittest import mock - -from jinja2 import Template - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture - -from tripleoclient.tests.v1.test_plugin import TestPluginV1 - -# Load the plugin init module for the plugin list and show commands -from tripleoclient.v1 import undercloud - - -class FakePluginV1Client(object): - def __init__(self, **kwargs): - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - - -class TestUndercloudInstall(TestPluginV1): - - def setUp(self): - super(TestUndercloudInstall, self).setUp() - - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - self.conf.config(container_images_file='/home/stack/foo.yaml') - self.conf.set_default('output_dir', '/home/stack') - # setting this so we don't have to mock get_local_timezone everywhere - self.conf.set_default('undercloud_timezone', 'UTC') - # don't actually load config from ~/undercloud.conf - self.mock_config_load = self.useFixture( - fixtures.MockPatch('tripleoclient.utils.load_config')) - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = undercloud.InstallUndercloud(self.app, app_args) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('builtins.open') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - def test_undercloud_install_default(self, mock_subprocess, - mock_wr, - mock_os, mock_copy, - mock_open, mock_user, mock_getuid): - arglist = ['--no-validations'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('builtins.open') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - def test_undercloud_install_with_reproduce_command(self, mock_subprocess, - mock_wr, - mock_os, mock_copy, - mock_open, mock_user, - mock_getuid): - arglist = ['--no-validations', '--reproduce-command'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', '--reproduce-command', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.makedirs', return_value=None) - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - def test_undercloud_install_with_heat_customized(self, mock_subprocess, - mock_wr, mock_os, - mock_copy, mock_user, - mock_getuid): - self.conf.config(output_dir='/foo') - self.conf.config(templates='/usertht') - self.conf.config(heat_native='false') - self.conf.config(roles_file='foo/roles.yaml') - arglist = ['--no-validations', '--force-stack-update'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_os.assert_has_calls( - [ - mock.call('/foo/tripleo-config-generated-env-files'), - mock.call('/foo') - ]) - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usertht', - '--roles-file=foo/roles.yaml', - '--networks-file=/usertht/network_data_undercloud.yaml', - '--heat-native=False', '-e', - '/usertht/environments/undercloud.yaml', '-e', - '/usertht/environments/use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usertht/environments/services/ironic.yaml', '-e', - '/usertht/environments/services/ironic-inspector.yaml', '-e', - '/usertht/environments/public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usertht/environments/ssl/tls-endpoints-public-ip.yaml', '-e', - '/usertht/environments/services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/foo', '--cleanup', '-e', - '/foo/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usertht/undercloud-stack-vstate-dropin.yaml', - '--force-stack-update']) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('tripleoclient.v1.undercloud_config.' - '_generate_masquerade_networks', autospec=True) - @mock.patch('tripleoclient.v1.undercloud_config.' - '_generate_subnets_static_routes', autospec=True) - @mock.patch('tripleoclient.v1.undercloud_config.' - '_get_jinja_env_source', autospec=True) - @mock.patch('tripleoclient.v1.undercloud_config.' - '_get_unknown_instack_tags', return_value=None, autospec=True) - @mock.patch('jinja2.meta.find_undeclared_variables', return_value={}, - autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - def test_undercloud_install_with_heat_net_conf_over(self, mock_subprocess, - mock_j2_meta, - mock_get_unknown_tags, - mock_get_j2, - mock_sroutes, - mock_masq, - mock_wr, mock_os, - mock_copy, mock_user, - mock_getuid): - self.conf.config(net_config_override='/foo/net-config.json') - self.conf.config(local_interface='ethX') - self.conf.config(undercloud_public_host='4.3.2.1') - self.conf.config(local_mtu='1234') - self.conf.config(undercloud_nameservers=['8.8.8.8', '8.8.4.4']) - self.conf.config(subnets='foo') - self.conf.config(local_subnet='foo') - mock_masq.return_value = {'1.1.1.1/11': ['2.2.2.2/22']} - mock_sroutes.return_value = {'ip_netmask': '1.1.1.1/11', - 'next_hop': '1.1.1.1'} - instack_net_conf = """ - "network_config": [ - { - "type": "ovs_bridge", - "name": "br-ctlplane", - "ovs_extra": [ - "br-set-external-id br-ctlplane bridge-id br-ctlplane" - ], - "members": [ - { - "type": "interface", - "name": "{{LOCAL_INTERFACE}}", - "primary": "true", - "mtu": {{LOCAL_MTU}}, - "dns_servers": {{UNDERCLOUD_NAMESERVERS}} - } - ], - "addresses": [ - { - "ip_netmask": "{{PUBLIC_INTERFACE_IP}}" - } - ], - "routes": {{SUBNETS_STATIC_ROUTES}}, - "mtu": {{LOCAL_MTU}} - } - ] - """ - expected_net_conf = json.loads( - """ - {"network_config": [ - { - "type": "ovs_bridge", - "name": "br-ctlplane", - "ovs_extra": [ - "br-set-external-id br-ctlplane bridge-id br-ctlplane" - ], - "members": [ - { - "type": "interface", - "name": "ethX", - "primary": "true", - "mtu": 1234, - "dns_servers": ["8.8.8.8", "8.8.4.4"] - } - ], - "addresses": [ - { - "ip_netmask": "4.3.2.1" - } - ], - "routes": {"next_hop": "1.1.1.1", "ip_netmask": "1.1.1.1/11"}, - "mtu": 1234 - } - ]} - """ - ) - env = mock.Mock() - env.get_template = mock.Mock(return_value=Template(instack_net_conf)) - mock_get_j2.return_value = (env, None) - arglist = ['--no-validations'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - os_orig = os.path.exists - with mock.patch('os.path.exists') as mock_exists: - - def fcheck(*args, **kwargs): - if '/foo/net-config.json' in args: - return True - return os_orig(*args, **kwargs) - - mock_exists.side_effect = fcheck - self.cmd.take_action(parsed_args) - - # unpack the write env file call to verify if the produced net config - # override JSON matches our expectations - found_net_conf_override = False - for call in mock_wr.call_args_list: - args, kwargs = call - for a in args: - if 'UndercloudNetConfigOverride' in a: - found_net_conf_override = True - self.assertTrue( - a['UndercloudNetConfigOverride'] == expected_net_conf) - self.assertTrue(found_net_conf_override) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'environments/services/masquerade-networks.yaml', - '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '4.3.2.1', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', - '--cleanup', '-e', - '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('builtins.open') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - def test_undercloud_install_with_heat_and_debug(self, mock_subprocess, - mock_wr, - mock_os, mock_copy, - mock_open, mock_user, - mock_getuid): - self.conf.config(undercloud_log_file='/foo/bar') - arglist = ['--no-validations'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - old_verbose = self.cmd.app_args.verbose_level - self.cmd.app_args.verbose_level = 2 - self.cmd.take_action(parsed_args) - self.cmd.app_args.verbose_level = old_verbose - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--debug', '--log-file=/foo/bar', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('builtins.open') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - def test_undercloud_install_with_heat_true(self, mock_subprocess, - mock_wr, - mock_os, mock_copy, - mock_open, mock_user, - mock_getuid): - self.conf.config(undercloud_log_file='/foo/bar') - arglist = ['--no-validations'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', '--log-file=/foo/bar', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - -class TestUndercloudUpgrade(TestPluginV1): - def setUp(self): - super(TestUndercloudUpgrade, self).setUp() - - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - self.conf.config(container_images_file='/home/stack/foo.yaml') - self.conf.set_default('output_dir', '/home/stack') - # setting this so we don't have to mock get_local_timezone everywhere - self.conf.set_default('undercloud_timezone', 'UTC') - # don't actually load config from ~/undercloud.conf - self.mock_config_load = self.useFixture( - fixtures.MockPatch('tripleoclient.utils.load_config')) - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.cmd = undercloud.UpgradeUndercloud(self.app, app_args) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_default(self, mock_run_command, - mock_subprocess, mock_wr, - mock_os_mkdir, mock_copy, mock_user, - mock_getuid, mock_confirm): - arglist = ['--no-validations'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - mock_run_command.assert_called_with( - ['sudo', 'dnf', 'upgrade', '-y', - 'python3-tripleoclient', - 'openstack-tripleo-common', - 'openstack-tripleo-heat-templates', - 'openstack-tripleo-validations', - 'tripleo-ansible'], - name='Update extra packages' - ) - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--upgrade', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'lifecycle/undercloud-upgrade-prepare.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_all_opts(self, mock_run_command, - mock_subprocess, - mock_wr, - mock_os, mock_copy, mock_user, - mock_getuid): - arglist = ['--force-stack-update', '--no-validations', - '--inflight-validations', '--dry-run', '--yes', - '--disable-container-prepare', '--reproduce-command', - '--skip-package-updates'] - verifylist = [] - self.cmd.app_args.verbose_level = 2 - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - mock_run_command.assert_not_called() - mock_subprocess.assert_not_called() - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_no_pkgs(self, mock_run_command, - mock_subprocess, - mock_wr, - mock_os, mock_copy, mock_user, - mock_getuid, mock_confirm): - arglist = ['--no-validations', '--skip-package-updates'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--upgrade', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'lifecycle/undercloud-upgrade-prepare.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_with_heat_enabled(self, mock_run_command, - mock_subprocess, - mock_wr, mock_os, - mock_copy, mock_user, - mock_getuid, mock_confirm): - arglist = ['--no-validations', '--skip-package-updates'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--upgrade', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'lifecycle/undercloud-upgrade-prepare.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_with_heat_true(self, mock_run_command, - mock_subprocess, - mock_wr, mock_os, - mock_copy, mock_user, - mock_getuid, mock_confirm): - arglist = ['--no-validations', '--skip-package-updates'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--upgrade', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'lifecycle/undercloud-upgrade-prepare.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_with_heat_reproduce_and_yes(self, - mock_run_command, - mock_subprocess, - mock_wr, mock_os, - mock_copy, - mock_user, - mock_getuid): - arglist = ['--no-validations', '-y', '--skip-package-updates', - '--reproduce-command'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - self.cmd.take_action(parsed_args) - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '-y', '--upgrade', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'lifecycle/undercloud-upgrade-prepare.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - # TODO(cjeanner) drop once we have proper oslo.privsep - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', '--reproduce-command', - '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) - - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation', - return_value=True) - # TODO(cjeanner) drop once we have proper oslo.privsep - @mock.patch('os.geteuid', return_value=1001) - @mock.patch('getpass.getuser', return_value='stack') - @mock.patch('shutil.copy') - @mock.patch('os.mkdir') - @mock.patch('tripleoclient.utils.write_env_file', autospec=True) - @mock.patch('subprocess.check_call', autospec=True) - @mock.patch('tripleoclient.utils.run_command', autospec=True) - def test_undercloud_upgrade_with_heat_and_debug(self, mock_run_command, - mock_subprocess, - mock_wr, mock_os, - mock_copy, mock_user, - mock_getuid, mock_confirm): - arglist = ['--no-validations', '--skip-package-updates'] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # DisplayCommandBase.take_action() returns two tuples - old_verbose = self.cmd.app_args.verbose_level - self.cmd.app_args.verbose_level = 2 - self.cmd.take_action(parsed_args) - self.cmd.app_args.verbose_level = old_verbose - - mock_subprocess.assert_called_with( - ['sudo', '--preserve-env', 'openstack', 'tripleo', 'deploy', - '--standalone-role', 'Undercloud', '--stack', - 'undercloud', '--local-domain=localdomain', - '--local-ip=192.168.24.1/24', - '--templates=/usr/share/openstack-tripleo-heat-templates/', - '--roles-file=/usr/share/openstack-tripleo-heat-templates/' - 'roles_data_undercloud.yaml', - '--networks-file=/usr/share/openstack-tripleo-heat-templates/' - 'network_data_undercloud.yaml', - '--upgrade', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'lifecycle/undercloud-upgrade-prepare.yaml', - '--heat-native', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'undercloud.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'use-dns-for-vips.yaml', '-e', - '/home/stack/foo.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/ironic-inspector.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'public-tls-undercloud.yaml', - '--public-virtual-ip', '192.168.24.2', - '--control-virtual-ip', '192.168.24.3', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'ssl/tls-endpoints-public-ip.yaml', '-e', - '/usr/share/openstack-tripleo-heat-templates/environments/' - 'services/undercloud-haproxy.yaml', - '--deployment-user', 'stack', - '--output-dir=/home/stack', '--cleanup', - '-e', '/home/stack/tripleo-config-generated-env-files/' - 'undercloud_parameters.yaml', - '--debug', '--log-file=install-undercloud.log', '-e', - '/usr/share/openstack-tripleo-heat-templates/' - 'undercloud-stack-vstate-dropin.yaml']) diff --git a/tripleoclient/tests/v2/__init__.py b/tripleoclient/tests/v2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/container_image/__init__.py b/tripleoclient/tests/v2/container_image/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/container_image/test_tripleo_container_image.py b/tripleoclient/tests/v2/container_image/test_tripleo_container_image.py deleted file mode 100644 index 0bfccd071..000000000 --- a/tripleoclient/tests/v2/container_image/test_tripleo_container_image.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleoclient.tests import fakes -from tripleoclient.tests.v1.overcloud_deploy import fakes as deploy_fakes -from tripleoclient.v2 import tripleo_container_image as tcib - - -IMAGE_YAML = """--- -container_images: - - image_source: "tripleo" - imagename: "test/keystone:tag" -""" - -MOCK_WALK = [ - ("", ["base"], [],), - ("/base", ["memcached", "openstack"], ["config.yaml", "test.doc"],), - ("/base/memcached", [], ["memcached.yaml"],), - ("/base/openstack", ["glance", "keystone", "neutron", "nova"], [],), - ( - "/base/openstack/glance", - [], - ["glance-registry.yaml", "glance-api.yaml"], - ), - ("/base/openstack/keystone", [], ["keystone.yaml"],), - ("/base/openstack/neutron", ["api"], [],), - ("/base/openstack/neutron/api", [], ["neutron-api.yml"],), - ("/base/openstack/nova", [], [],), -] - - -class TestContainerImages(deploy_fakes.TestDeployOvercloud): - def setUp(self): - super(TestContainerImages, self).setUp() - self.app = fakes.FakeApp() - self.os_walk = mock.patch( - "os.walk", autospec=True, return_value=iter(MOCK_WALK) - ) - self.os_walk.start() - self.addCleanup(self.os_walk.stop) - self.os_listdir = mock.patch( - "os.listdir", autospec=True, return_value=["config.yaml"] - ) - self.os_listdir.start() - self.addCleanup(self.os_listdir.stop) - self.run_ansible_playbook = mock.patch( - "tripleoclient.utils.run_ansible_playbook", autospec=True - ) - self.run_ansible_playbook.start() - self.addCleanup(self.run_ansible_playbook.stop) - self.buildah_build_all = mock.patch( - "tripleo_common.image.builder.buildah.BuildahBuilder.build_all", - autospec=True, - ) - self.mock_buildah = self.buildah_build_all.start() - self.addCleanup(self.buildah_build_all.stop) - self.cmd = tcib.Build(self.app, None) - - def _take_action(self, parsed_args): - self.cmd.image_parents = {"keystone": "base"} - mock_open = mock.mock_open(read_data=IMAGE_YAML) - with mock.patch("os.path.isfile", autospec=True) as mock_isfile: - mock_isfile.return_value = True - with mock.patch("os.path.isdir", autospec=True) as mock_isdir: - mock_isdir.return_value = True - with mock.patch('builtins.open', mock_open): - with mock.patch( - "tripleoclient.v2.tripleo_container_image.Build" - ".find_image", - autospec=True, - ) as mock_find_image: - mock_find_image.return_value = {"tcib_option": "data"} - self.cmd.take_action(parsed_args) - - def test_find_image(self): - mock_open = mock.mock_open(read_data='---\ntcib_option: "data"') - with mock.patch('builtins.open', mock_open): - image = self.cmd.find_image("keystone", "some/path", "base-image") - self.assertEqual(image, {"tcib_option": "data"}) - - def test_build_tree(self): - image = self.cmd.build_tree("some/path") - self.assertEqual( - image, - [ - { - "base": [ - "memcached", - { - "openstack": [ - "glance", - "keystone", - {"neutron": ["api"]}, - "nova", - ] - }, - ] - } - ], - ) - - def test_image_regex(self): - image = self.cmd.imagename_to_regex("test/centos-binary-keystone:tag") - self.assertEqual(image, "keystone") - image = self.cmd.imagename_to_regex("test/rhel-binary-keystone:tag") - self.assertEqual(image, "keystone") - image = self.cmd.imagename_to_regex("test/rhel-source-keystone:tag") - self.assertEqual(image, "keystone") - image = self.cmd.imagename_to_regex("test/rhel-rdo-keystone:tag") - self.assertEqual(image, "keystone") - image = self.cmd.imagename_to_regex("test/rhel-rhos-keystone:tag") - self.assertEqual(image, "keystone") - image = self.cmd.imagename_to_regex("test/other-keystone:tag") - self.assertEqual(image, "other-keystone") - - def test_rectify_excludes(self): - self.cmd.identified_images = ["keystone", "nova", "glance"] - excludes = self.cmd.rectify_excludes(images_to_prepare=["glance"]) - self.assertEqual(excludes, ["keystone", "nova"]) - - def test_image_build_yaml(self): - arglist = ["--config-file", "config.yaml"] - verifylist = [("config_file", "config.yaml")] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - - assert self.mock_buildah.called - - def test_image_build_with_skip_build(self): - arglist = ["--config-file", "config.yaml", "--skip-build"] - verifylist = [("config_file", "config.yaml"), ("skip_build", True)] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - - assert not self.mock_buildah.called - - def test_image_build_with_push(self): - arglist = ["--config-file", "config.yaml", "--push"] - verifylist = [("config_file", "config.yaml"), ("push", True)] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - - assert self.mock_buildah.called - - def test_image_build_with_volume(self): - arglist = ["--config-file", "config.yaml", "--volume", "bind/mount"] - verifylist = [ - ("config_file", "config.yaml"), - ( - "volumes", - [ - "/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z", - "bind/mount", - ], - ), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - - # NOTE(dvd): For some reason, in py36, args[0] is a string instead - # of being a fullblown BuildahBuilder instance. I wasn't able to find - # the instance anywhere, everything is mocked. - builder_obj = self.mock_buildah.call_args.args[0] - if not isinstance(builder_obj, str): - self.assertIn( - '/etc/yum.repos.d:/etc/distro.repos.d:z', - builder_obj.volumes - ) - - assert self.mock_buildah.called - - def test_image_build_with_repo_dir(self): - arglist = ["--repo-dir", "/somewhere"] - verifylist = [ - ("repo_dir", "/somewhere"), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - - builder_obj = self.mock_buildah.call_args.args[0] - if not isinstance(builder_obj, str): - self.assertIn( - '/somewhere:/etc/distro.repos.d:z', - builder_obj.volumes - ) - - assert self.mock_buildah.called - - def test_image_build_with_exclude(self): - arglist = ["--exclude", "image1"] - verifylist = [ - ("excludes", ["image1"]), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - - assert self.mock_buildah.called - - def test_image_build_failure_no_config_file(self): - arglist = ["--config-file", "not-a-file-config.yaml"] - verifylist = [ - ("config_file", "not-a-file-config.yaml"), - ] - - self.check_parser(self.cmd, arglist, verifylist) - - def test_image_build_config_dir(self): - arglist = ["--config-file", "config.yaml", "--config-path", "/foo"] - verifylist = [("config_file", "config.yaml"), ("config_path", "/foo")] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self._take_action(parsed_args=parsed_args) - self.assertEqual(self.cmd.tcib_config_path, '/foo/tcib') - - def test_image_build_failure_no_config_dir(self): - arglist = ["--config-path", "not-a-path"] - verifylist = [ - ("config_path", "not-a-path"), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - with mock.patch("os.path.isfile", autospec=True) as mock_isfile: - mock_isfile.return_value = True - self.assertRaises(IOError, self.cmd.take_action, parsed_args) - - def test_process_images(self): - rtn_value = {'yay': 'values'} - arglist = ["--config-path", "foobar/"] - verifylist = [ - ("config_path", "foobar/"), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - expected_images = ['foo', 'foobar'] - image_configs = {} - self.cmd.tcib_config_path = '/foo/tcib' - with mock.patch("tripleoclient.v2.tripleo_container_image.Build" - ".find_image", autospec=True) as mock_find_image: - - mock_find_image.return_value = rtn_value - cfgs = self.cmd.process_images(expected_images, parsed_args, - image_configs) - mock_find_image.assert_called_once_with( - self.cmd, 'foo', '/foo/tcib', 'centos:stream9') - self.assertEqual(cfgs, {'foo': rtn_value}) - - -class TestContainerImagesHotfix(deploy_fakes.TestDeployOvercloud): - def setUp(self): - super(TestContainerImagesHotfix, self).setUp() - self.run_ansible_playbook = mock.patch( - "tripleoclient.utils.run_ansible_playbook", autospec=True - ) - self.run_ansible_playbook.start() - self.addCleanup(self.run_ansible_playbook.stop) - self.cmd = tcib.HotFix(self.app, None) - - def _take_action(self, parsed_args): - with mock.patch("os.path.isfile", autospec=True) as mock_isfile: - mock_isfile.return_value = True - self.cmd.take_action(parsed_args) - - def test_image_hotfix(self): - arglist = ["--image", "container1", "--rpms-path", "/opt"] - verifylist = [ - ("images", ["container1"]), - ("rpms_path", "/opt"), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self._take_action(parsed_args=parsed_args) - - def test_image_hotfix_multi_image(self): - arglist = [ - "--image", - "container1", - "--image", - "container2", - "--rpms-path", - "/opt", - ] - verifylist = [ - ("images", ["container1", "container2"]), - ("rpms_path", "/opt"), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self._take_action(parsed_args=parsed_args) - - def test_image_hotfix_missing_args(self): - arglist = [] - verifylist = [] - - self.assertRaises( - deploy_fakes.fakes.utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist, - ) diff --git a/tripleoclient/tests/v2/overcloud_ceph/__init__.py b/tripleoclient/tests/v2/overcloud_ceph/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py b/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py deleted file mode 100644 index 8a81d7193..000000000 --- a/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib import exceptions as osc_lib_exc - -from tripleoclient.tests import fakes -from tripleoclient.v2 import overcloud_ceph - - -class TestOvercloudCephDeploy(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudCephDeploy, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_ceph.OvercloudCephDeploy(self.app, - app_args) - - @mock.patch('tripleoclient.utils.get_ceph_networks', autospec=True) - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_deploy_ceph(self, mock_playbook, mock_abspath, - mock_path_exists, mock_tempdirs, - mock_get_ceph_networks): - arglist = ['deployed-metal.yaml', '--yes', - '--stack', 'overcloud', - '--skip-user-create', - '--skip-hosts-config', - '--mon-ip', '127.0.0.1', - '--cephadm-ssh-user', 'jimmy', - '--output', 'deployed-ceph.yaml', - '--container-namespace', 'quay.io/ceph', - '--container-image', 'ceph', - '--container-tag', 'latest'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - playbook='cli-deployed-ceph.yaml', - inventory=mock.ANY, - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - skip_tags='cephadm_ssh_user', - reproduce_command=False, - extra_vars_file=mock.ANY, - extra_vars={ - "baremetal_deployed_path": mock.ANY, - "deployed_ceph_tht_path": mock.ANY, - "working_dir": mock.ANY, - "stack_name": 'overcloud', - "tripleo_ceph_client_vars": mock.ANY, - "tripleo_cephadm_standalone": False, - 'tripleo_cephadm_ssh_user': 'jimmy', - 'tripleo_cephadm_cluster': 'ceph', - 'tripleo_cephadm_ingress': True, - 'tripleo_cephadm_first_mon_ip': '127.0.0.1', - 'tripleo_roles_path': mock.ANY, - 'tripleo_cephadm_container_ns': 'quay.io/ceph', - 'tripleo_cephadm_container_image': 'ceph', - 'tripleo_cephadm_container_tag': 'latest', - } - ) - - @mock.patch('tripleoclient.utils.get_ceph_networks', autospec=True) - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_deploy_ceph_spec(self, mock_playbook, mock_abspath, - mock_path_exists, mock_tempdirs, - mock_get_ceph_networks): - arglist = ['--yes', - '--stack', 'overcloud', - '--skip-user-create', - '--skip-hosts-config', - '--mon-ip', '127.0.0.1', - '--ceph-spec', 'ceph_spec.yaml', - '--cephadm-ssh-user', 'jimmy', - '--output', 'deployed-ceph.yaml', - '--container-namespace', 'quay.io/ceph', - '--container-image', 'ceph', - '--container-tag', 'latest'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - playbook='cli-deployed-ceph.yaml', - inventory=mock.ANY, - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - skip_tags='cephadm_ssh_user', - reproduce_command=False, - extra_vars_file=mock.ANY, - extra_vars={ - "deployed_ceph_tht_path": mock.ANY, - "working_dir": mock.ANY, - "stack_name": 'overcloud', - "tripleo_ceph_client_vars": mock.ANY, - "tripleo_cephadm_standalone": False, - 'tripleo_roles_path': mock.ANY, - 'tripleo_cephadm_first_mon_ip': '127.0.0.1', - 'tripleo_cephadm_cluster': 'ceph', - 'dynamic_ceph_spec': False, - 'tripleo_cephadm_ingress': True, - 'ceph_spec_path': mock.ANY, - 'tripleo_cephadm_container_ns': 'quay.io/ceph', - 'tripleo_cephadm_container_image': 'ceph', - 'tripleo_cephadm_container_tag': 'latest', - 'tripleo_cephadm_ssh_user': 'jimmy', - } - ) - - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - def test_overcloud_deploy_ceph_no_overwrite(self, mock_abspath, - mock_path_exists): - arglist = ['deployed-metal.yaml', - '--stack', 'overcloud', - '--output', 'deployed-ceph.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - def test_overcloud_deploy_ceph_no_metal(self, mock_abspath, - mock_path_exists): - arglist = ['--stack', 'overcloud', - '--output', 'deployed-ceph.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - @mock.patch('tripleoclient.utils.get_ceph_networks', autospec=True) - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_deploy_ceph_ansible_no_force(self, - mock_playbook, - mock_abspath, - mock_path_exists, - mock_tempdirs, - mock_get_ceph_networks): - arglist = ['deployed-metal.yaml', '--yes', - '--ansible-extra-vars', 'foo.yml', - '--output', 'deployed-ceph.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - -class TestOvercloudCephUserDisable(fakes.FakePlaybookExecution): - def setUp(self): - super(TestOvercloudCephUserDisable, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_ceph.OvercloudCephUserDisable(self.app, - app_args) - - @mock.patch('tripleoclient.utils.parse_ansible_inventory', - autospec=True, return_value=['ceph0', 'ceph1', 'compute0']) - @mock.patch('tripleoclient.utils.get_host_groups_from_ceph_spec', - autospec=True, return_value={'_admin': ['ceph0'], - 'non_admin': ['ceph1']}) - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_ceph_user_disable(self, mock_playbook, mock_abspath, - mock_path_exists, mock_tempdirs, - mock_get_host_groups_from_ceph_spec, - mock_parse_ansible_inventory): - arglist = ['ceph_spec.yaml', '--yes', - '--cephadm-ssh-user', 'ceph-admin', - '--stack', 'overcloud', - '--fsid', '7bdfa1a6-d606-562c-bbf7-05f17c35763e'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_any_call( - playbook='disable_cephadm.yml', - inventory=mock.ANY, - limit_hosts=mock.ANY, - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - reproduce_command=False, - extra_vars={ - "tripleo_cephadm_fsid": '7bdfa1a6-d606-562c-bbf7-05f17c35763e', - "tripleo_cephadm_action": 'disable' - } - ) - mock_playbook.assert_any_call( - playbook='ceph-admin-user-disable.yml', - inventory=mock.ANY, - limit_hosts='ceph0,ceph1', - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - reproduce_command=False, - extra_vars={ - 'tripleo_cephadm_ssh_user': 'ceph-admin', - } - ) - - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - def test_ceph_user_disable_no_yes(self, mock_abspath, - mock_path_exists): - arglist = ['ceph_spec.yaml', - '--cephadm-ssh-user', 'ceph-admin', - '--stack', 'overcloud', - '--fsid', '7bdfa1a6-d606-562c-bbf7-05f17c35763e'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - def test_ceph_user_disable_invalid_fsid(self, mock_abspath, - mock_path_exists): - arglist = ['ceph_spec.yaml', - '--cephadm-ssh-user', 'ceph-admin', - '--stack', 'overcloud', - '--fsid', 'invalid_fsid'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - -class TestOvercloudCephUserEnable(fakes.FakePlaybookExecution): - def setUp(self): - super(TestOvercloudCephUserEnable, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_ceph.OvercloudCephUserEnable(self.app, - app_args) - - @mock.patch('tripleoclient.utils.parse_ansible_inventory', - autospec=True, return_value=['ceph0', 'ceph1', 'compute0']) - @mock.patch('tripleoclient.utils.get_host_groups_from_ceph_spec', - autospec=True, return_value={'_admin': ['ceph0'], - 'non_admin': ['ceph1']}) - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_ceph_user_enable_no_fsid(self, mock_playbook, mock_abspath, - mock_path_exists, mock_tempdirs, - mock_get_host_groups_from_ceph_spec, - mock_parse_ansible_inventory): - arglist = ['ceph_spec.yaml', - '--cephadm-ssh-user', 'ceph-admin', - '--stack', 'overcloud'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - # only passes if the call is the most recent one - mock_playbook.assert_called_with( - playbook='ceph-admin-user-playbook.yml', - inventory=mock.ANY, - limit_hosts='ceph1,undercloud', - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - reproduce_command=False, - extra_vars={ - "tripleo_admin_user": 'ceph-admin', - "distribute_private_key": False, - } - ) - - @mock.patch('tripleoclient.utils.parse_ansible_inventory', - autospec=True) - @mock.patch('tripleoclient.utils.get_host_groups_from_ceph_spec', - autospec=True) - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_ceph_user_enable_fsid(self, mock_playbook, mock_abspath, - mock_path_exists, mock_tempdirs, - mock_get_host_groups_from_ceph_spec, - mock_parse_ansible_inventory): - arglist = ['ceph_spec.yaml', - '--cephadm-ssh-user', 'ceph-admin', - '--stack', 'overcloud', - '--fsid', '7bdfa1a6-d606-562c-bbf7-05f17c35763e'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - # ceph-admin-user-playbook.yml is not called when - # get_host_groups_from_ceph_spec returns empty lists - # that use case is covered in test_ceph_user_enable_no_fsid - mock_playbook.assert_called_with( - playbook='disable_cephadm.yml', - inventory=mock.ANY, - limit_hosts=mock.ANY, - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - reproduce_command=False, - extra_vars={ - "tripleo_cephadm_fsid": '7bdfa1a6-d606-562c-bbf7-05f17c35763e', - "tripleo_cephadm_backend": 'cephadm', - "tripleo_cephadm_action": 'enable' - } - ) - - -class TestOvercloudCephSpec(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudCephSpec, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_ceph.OvercloudCephSpec(self.app, - app_args) - - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_ceph_spec(self, mock_playbook, mock_abspath, - mock_path_exists, mock_tempdirs): - arglist = ['deployed-metal.yaml', '--yes', - '--stack', 'overcloud', - '--roles-data', 'roles_data.yaml', - '--osd-spec', 'osd_spec.yaml', - '--output', 'ceph_spec.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - playbook='cli-deployed-ceph.yaml', - inventory=mock.ANY, - workdir=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - tags='ceph_spec', - reproduce_command=False, - rotate_log=True, - extra_vars={ - "baremetal_deployed_path": mock.ANY, - 'tripleo_roles_path': mock.ANY, - 'osd_spec_path': mock.ANY, - 'ceph_spec_path': mock.ANY, - } - ) diff --git a/tripleoclient/tests/v2/overcloud_delete/__init__.py b/tripleoclient/tests/v2/overcloud_delete/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/overcloud_delete/test_overcloud_delete.py b/tripleoclient/tests/v2/overcloud_delete/test_overcloud_delete.py deleted file mode 100644 index e2cd21ac2..000000000 --- a/tripleoclient/tests/v2/overcloud_delete/test_overcloud_delete.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import tempfile - -from unittest import mock - -from osc_lib import exceptions - -from tripleoclient import constants -from tripleoclient.tests import fakes -from tripleoclient.tests.v1.overcloud_deploy import fakes as deploy_fakes -from tripleoclient.v2 import overcloud_delete - - -class TestDeleteOvercloud(deploy_fakes.TestDeployOvercloud): - - def setUp(self): - super(TestDeleteOvercloud, self).setUp() - self.app = fakes.FakeApp() - self.cmd = overcloud_delete.DeleteOvercloud(self.app, None) - - @mock.patch("tripleoclient.utils.run_ansible_playbook", autospec=True) - @mock.patch('os.chdir', autospec=True) - @mock.patch('tempfile.mkdtemp', autospec=True) - def test_overcloud_delete(self, mock_mkdir, mock_cd, mock_run_playbook): - arglist = ["overcast", "--heat-type", "native", "-y"] - verifylist = [ - ("stack", "overcast"), - ("heat_type", "native"), - ("yes", True) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - - mock_run_playbook.assert_called_once_with( - ['cli-cleanup-ipa.yml', 'cli-overcloud-delete.yaml'], - constants.ANSIBLE_INVENTORY.format('overcast'), - mock.ANY, - constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - extra_vars={ - "stack_name": "overcast", - }, - verbosity=3, - ) - - @mock.patch("tripleoclient.utils.run_ansible_playbook", autospec=True) - @mock.patch('os.chdir', autospec=True) - @mock.patch('tempfile.mkdtemp', autospec=True) - def test_overcloud_delete_unprovision(self, mock_mkdir, - mock_cd, mock_run_playbook): - arglist = ["overcast", "-y", - '--network-ports'] - verifylist = [ - ("stack", "overcast"), - ("yes", True), - ("network_ports", True) - ] - - with tempfile.NamedTemporaryFile() as inp: - inp.write(b'- name: Compute\n- name: Controller\n') - inp.flush() - arglist.extend(['-b', inp.name]) - verifylist.append(('baremetal_deployment', inp.name)) - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_run_playbook.assert_called_with( - 'cli-overcloud-node-unprovision.yaml', - 'localhost,', - mock.ANY, - constants.ANSIBLE_TRIPLEO_PLAYBOOKS.format('overcast'), - extra_vars={ - "stack_name": "overcast", - "baremetal_deployment": mock.ANY, - "all": True, - "prompt": False, - "manage_network_ports": True, - }, - verbosity=3, - ) - self.assertEqual(mock_run_playbook.call_count, 2) - - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_delete_network_unprovision(self, mock_run_playbook, - mock_path_exists, - mock_abspath, mock_tempdirs): - arglist = ["overcast", "-y", - "--networks-file", "network_data_v2.yaml"] - verifylist = [ - ("stack", "overcast"), - ("yes", True), - ("networks_file", "network_data_v2.yaml") - ] - mock_abspath.side_effect = ['/test/network_data_v2.yaml', - '/test/network_data_v2.yaml'] - mock_path_exists.side_effect = [True] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_run_playbook.assert_called_with( - workdir=mock.ANY, - playbook='cli-overcloud-network-unprovision.yaml', - inventory=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - "network_data_path": '/test/network_data_v2.yaml' - } - ) - self.assertEqual(mock_run_playbook.call_count, 2) - - def test_no_confirmation(self): - arglist = ["overcast", ] - verifylist = [ - ("stack", "overcast"), - ("yes", False), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) - - @mock.patch("tripleoclient.utils.run_ansible_playbook", autospec=True) - def test_skip_ipa_cleanup(self, mock_run_playbook): - arglist = ["overcast", "-y", "--skip-ipa-cleanup"] - verifylist = [ - ("stack", "overcast"), - ("yes", True), - ("skip_ipa_cleanup", True) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - mock_run_playbook.assert_called_once_with( - ['cli-overcloud-delete.yaml'], - constants.ANSIBLE_INVENTORY.format('overcast'), - mock.ANY, - constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - extra_vars={ - "stack_name": "overcast", - }, - verbosity=3, - ) diff --git a/tripleoclient/tests/v2/overcloud_network/__init__.py b/tripleoclient/tests/v2/overcloud_network/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/overcloud_network/test_overcloud_network.py b/tripleoclient/tests/v2/overcloud_network/test_overcloud_network.py deleted file mode 100644 index 575d80847..000000000 --- a/tripleoclient/tests/v2/overcloud_network/test_overcloud_network.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from osc_lib import exceptions as osc_lib_exc - -from tripleoclient import constants -from tripleoclient.tests import fakes -from tripleoclient.v2 import overcloud_network - - -class TestOvercloudNetworkExtract(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudNetworkExtract, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_network.OvercloudNetworkExtract(self.app, - app_args) - - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_network_extract(self, mock_playbook, mock_abspath, - mock_tempdirs): - mock_abspath.return_value = '/test/test' - arglist = ['--stack', 'overcloud', '--output', 'test', '--yes'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-network-extract.yaml', - inventory=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - "stack_name": 'overcloud', - "output": '/test/test', - "overwrite": True - } - ) - - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - def test_overcloud_network_extract_no_overwrite(self, mock_abspath, - mock_path_exists): - mock_abspath.return_value = '/test/test' - mock_path_exists.return_value = True - arglist = ['--stack', 'overcloud', '--output', 'test'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - -class TestOvercloudNetworkProvision(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudNetworkProvision, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_network.OvercloudNetworkProvision(self.app, None) - self.cmd.app_args = mock.Mock(verbose_level=1) - - # Mock copy to working dir - mock_copy_to_wd = mock.patch( - 'tripleoclient.utils.copy_to_wd', autospec=True) - mock_copy_to_wd.start() - self.addCleanup(mock_copy_to_wd.stop) - - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_network_provision(self, mock_playbook, mock_path_exists, - mock_abspath, mock_tempdirs): - arglist = ['--output', 'deployed_networks.yaml', '--yes', - 'network_data_v2.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - - mock_abspath.side_effect = ['/test/network_data_v2.yaml', - '/test/deployed_networks.yaml'] - mock_path_exists.side_effect = [True, True] - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-network-provision.yaml', - inventory=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - "network_data_path": '/test/network_data_v2.yaml', - "network_deployed_path": '/test/deployed_networks.yaml', - "overwrite": True, - 'templates': constants.TRIPLEO_HEAT_TEMPLATES, - } - ) - - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - def test_overcloud_network_extract_no_overwrite(self, mock_abspath, - mock_path_exists): - arglist = ['--output', 'deployed_networks.yaml', 'network-data.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - - mock_abspath.side_effect = ['/test/network_data_v2.yaml', - '/test/deployed_networks.yaml'] - mock_path_exists.side_effect = [True, True] - - self.assertRaises(osc_lib_exc.CommandError, - self.cmd.take_action, parsed_args) - - -class TestOvercloudNetworkUnprovision(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudNetworkUnprovision, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_network.OvercloudNetworkUnprovision(self.app, - None) - self.cmd.app_args = mock.Mock(verbose_level=1) - - @mock.patch('tripleoclient.utils.TempDirs', autospec=True) - @mock.patch('os.path.abspath', autospec=True) - @mock.patch('os.path.exists', autospec=True) - @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) - def test_overcloud_network_unprovision(self, mock_playbook, - mock_path_exists, - mock_abspath, mock_tempdirs): - arglist = ['--yes', 'network_data_v2.yaml'] - parsed_args = self.check_parser(self.cmd, arglist, []) - - mock_abspath.side_effect = ['/test/network_data_v2.yaml'] - mock_path_exists.side_effect = [True] - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-overcloud-network-unprovision.yaml', - inventory=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - "network_data_path": '/test/network_data_v2.yaml' - } - ) diff --git a/tripleoclient/tests/v2/overcloud_node/__init__.py b/tripleoclient/tests/v2/overcloud_node/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/overcloud_node/fakes.py b/tripleoclient/tests/v2/overcloud_node/fakes.py deleted file mode 100644 index 808d082d0..000000000 --- a/tripleoclient/tests/v2/overcloud_node/fakes.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import uuid - -from tripleoclient.tests import fakes - - -class TestDeleteNode(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestDeleteNode, self).setUp() - - -class TestOvercloudNode(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestOvercloudNode, self).setUp() - - -def make_fake_machine(machine_name, provision_state, - is_maintenance, machine_id=None): - if not machine_id: - machine_id = uuid.uuid4().hex - return(fakes.FakeMachine(id=machine_id, name=machine_name, - provision_state=provision_state, - is_maintenance=is_maintenance)) diff --git a/tripleoclient/tests/v2/overcloud_node/test_overcloud_node.py b/tripleoclient/tests/v2/overcloud_node/test_overcloud_node.py deleted file mode 100644 index 2d596ca01..000000000 --- a/tripleoclient/tests/v2/overcloud_node/test_overcloud_node.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -import fixtures -import json -import os -import tempfile -from unittest import mock - -import openstack -from osc_lib.tests import utils as test_utils - -from tripleoclient import constants -from tripleoclient.tests.v2.overcloud_node import fakes -from tripleoclient.v2 import overcloud_node -from tripleoclient.workflows import tripleo_baremetal as tb - - -@mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) -@mock.patch.object(openstack.baremetal.v1._proxy, 'Proxy', autospec=True, - name="mock_bm") -@mock.patch('openstack.config', autospec=True, name='mock_conf') -@mock.patch('openstack.connect', autospec=True, name='mock_connect') -@mock.patch.object(openstack.connection, 'Connection', autospec=True) -class TestImportNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestImportNode, self).setUp() - self.nodes_list = [{ - "pm_user": "stack", - "pm_addr": "192.168.122.1", - "pm_password": "KEY1", - "pm_type": "pxe_ssh", - "mac": [ - "00:0b:d0:69:7e:59" - ], - }, { - "pm_user": "stack", - "pm_addr": "192.168.122.2", - "pm_password": "KEY2", - "pm_type": "pxe_ssh", - "mac": [ - "00:0b:d0:69:7e:58" - ] - }] - - self.fake_baremetal_node = fakes.make_fake_machine( - machine_name='node1', - machine_id='4e540e11-1366-4b57-85d5-319d168d98a1', - provision_state='manageable', - is_maintenance=False - ) - self.fake_baremetal_node2 = fakes.make_fake_machine( - machine_name='node2', - machine_id='9070e42d-1ad7-4bd0-b868-5418bc9c7176', - provision_state='manageable', - is_maintenance=False - ) - self.json_file = tempfile.NamedTemporaryFile( - mode='w', delete=False, suffix='.json') - json.dump(self.nodes_list, self.json_file) - self.json_file.close() - self.addCleanup(os.unlink, self.json_file.name) - - # Get the command object to test - self.cmd = overcloud_node.ImportNode(self.app, None) - - image = collections.namedtuple('image', ['id', 'name']) - self.app.client_manager.image = mock.Mock() - self.app.client_manager.image.images.list.return_value = [ - image(id=3, name='overcloud-full'), - ] - - self.http_boot = '/var/lib/ironic/httpboot' - - self.useFixture(fixtures.MockPatch( - 'os.path.exists', autospec=True, - side_effect=lambda path: path in [os.path.join(self.http_boot, i) - for i in ('agent.kernel', - 'agent.ramdisk')])) - - def test_import_only(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - [self.json_file.name], - [('introspect', False), - ('provide', False)]) - self.cmd.take_action(parsed_args) - - def test_import_and_introspect(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - [self.json_file.name, - '--introspect'], - [('introspect', True), - ('provide', False)]) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=mock.ANY, - extra_vars={ - 'node_uuids': ['MOCK_NODE_UUID'], - 'run_validations': False, - 'concurrency': 20 - } - ) - - def test_import_and_provide(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - [self.json_file.name, - '--provide'], - [('introspect', False), - ('provide', True)]) - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal_introspection = mock_bm - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2] - - self.cmd.take_action(parsed_args) - - def test_import_and_introspect_and_provide(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - [self.json_file.name, - '--introspect', - '--provide'], - [('introspect', True), - ('provide', True)]) - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal_introspection = mock_bm - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2] - - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=mock.ANY, - extra_vars={ - 'node_uuids': ['MOCK_NODE_UUID'], - 'run_validations': False, - 'concurrency': 20 - } - ) - - def test_import_with_netboot(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - [self.json_file.name, - '--no-deploy-image'], - [('no_deploy_image', True)]) - self.cmd.take_action(parsed_args) - - def test_import_with_no_deployed_image(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - [self.json_file.name, - '--instance-boot-option', - 'netboot'], - [('instance_boot_option', 'netboot')]) - self.cmd.take_action(parsed_args) - - -@mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) -@mock.patch.object(openstack.baremetal.v1._proxy, 'Proxy', autospec=True, - name="mock_bm") -@mock.patch('openstack.config', autospec=True, name='mock_conf') -@mock.patch('openstack.connect', autospec=True, name='mock_connect') -@mock.patch.object(openstack.connection, 'Connection', autospec=True) -class TestIntrospectNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestIntrospectNode, self).setUp() - # Get the command object to test - self.cmd = overcloud_node.IntrospectNode(self.app, None) - self.fake_baremetal_node = fakes.make_fake_machine( - machine_name='node1', - machine_id='4e540e11-1366-4b57-85d5-319d168d98a1', - provision_state='manageable', - is_maintenance=False - ) - self.fake_baremetal_node2 = fakes.make_fake_machine( - machine_name='node2', - machine_id='9070e42d-1ad7-4bd0-b868-5418bc9c7176', - provision_state='manageable', - is_maintenance=False - ) - - def test_introspect_all_manageable_nodes_without_provide(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - ['--all-manageable'], - [('all_manageable', True)]) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook=mock.ANY, - inventory=mock.ANY, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=mock.ANY, - extra_vars={ - 'node_uuids': [], - 'run_validations': False, - 'concurrency': 20, - 'node_timeout': 1200, - 'max_retries': 1, - 'retry_timeout': 120, - } - ) - - def test_introspect_all_manageable_nodes_with_provide(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - parsed_args = self.check_parser(self.cmd, - ['--all-manageable', '--provide'], - [('all_manageable', True), - ('provide', True)]) - tb.TripleoProvide.provide = mock.MagicMock() - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal.nodes.side_effect = [ - iter([self.fake_baremetal_node, - self.fake_baremetal_node2]), - iter([self.fake_baremetal_node, - self.fake_baremetal_node2]) - ] - - expected_nodes = ['4e540e11-1366-4b57-85d5-319d168d98a1', - '9070e42d-1ad7-4bd0-b868-5418bc9c7176'] - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_with( - workdir=mock.ANY, - playbook='cli-baremetal-introspect.yaml', - inventory=mock.ANY, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=mock.ANY, - extra_vars={ - 'node_uuids': [], - 'run_validations': False, - 'concurrency': 20, - 'node_timeout': 1200, - 'max_retries': 1, - 'retry_timeout': 120, - } - ) - - tb.TripleoProvide.provide.assert_called_with( - expected_nodes) - - def test_introspect_nodes_without_provide(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - nodes = ['node_uuid1', 'node_uuid2'] - parsed_args = self.check_parser(self.cmd, - nodes, - [('node_uuids', nodes)]) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-baremetal-introspect.yaml', - inventory=mock.ANY, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=mock.ANY, - extra_vars={ - 'node_uuids': nodes, - 'run_validations': False, - 'concurrency': 20, - 'node_timeout': 1200, - 'max_retries': 1, - 'retry_timeout': 120, - } - ) - - def test_introspect_nodes_with_provide(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - nodes = ['node1', 'node2'] - argslist = nodes + ['--provide'] - parsed_args = self.check_parser(self.cmd, - argslist, - [('node_uuids', nodes), - ('provide', True)]) - tb.TripleoProvide.provide = mock.MagicMock() - mock_conn.return_value = mock_bm - mock_bm.baremetal = mock_bm - mock_bm.baremetal_introspection = mock_bm - mock_bm.baremetal.get_node.side_effect = [ - self.fake_baremetal_node, - self.fake_baremetal_node2] - - self.cmd.take_action(parsed_args) - - tb.TripleoProvide.provide.assert_called_with( - nodes=nodes - ) - - def test_introspect_no_node_or_flag_specified(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - self.assertRaises(test_utils.ParserException, - self.check_parser, - self.cmd, [], []) - - def test_introspect_uuids_and_all_both_specified(self, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook): - argslist = ['node_id1', 'node_id2', '--all-manageable'] - verifylist = [('node_uuids', ['node_id1', 'node_id2']), - ('all_manageable', True)] - self.assertRaises(test_utils.ParserException, - self.check_parser, - self.cmd, argslist, verifylist) - - def _check_introspect_all_manageable(self, parsed_args, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook, provide=False): - self.cmd.take_action(parsed_args) - - call_list = [mock.call( - 'tripleo.baremetal.v1.introspect_manageable_nodes', - workflow_input={'run_validations': False, 'concurrency': 20} - )] - - if provide: - call_list.append(mock.call( - 'tripleo.baremetal.v1.provide_manageable_nodes', - workflow_input={} - )) - - self.workflow.executions.create.assert_has_calls(call_list) - self.assertEqual(self.workflow.executions.create.call_count, - 2 if provide else 1) - - def _check_introspect_nodes(self, parsed_args, nodes, - mock_conn, - mock_connect, - mock_conf, - mock_bm, - mock_playbook, provide=False): - self.cmd.take_action(parsed_args) - - call_list = [mock.call( - 'tripleo.baremetal.v1.introspect', workflow_input={ - 'node_uuids': nodes, - 'run_validations': False, - 'concurrency': 20} - )] - - if provide: - call_list.append(mock.call( - 'tripleo.baremetal.v1.provide', workflow_input={ - 'node_uuids': nodes} - )) - - self.workflow.executions.create.assert_has_calls(call_list) - self.assertEqual(self.workflow.executions.create.call_count, - 2 if provide else 1) - - -class TestProvisionNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestProvisionNode, self).setUp() - self.cmd = overcloud_node.ProvisionNode(self.app, None) - self.cmd.app_args = mock.Mock(verbose_level=1) - - # Mock copy to working dir - mock_copy_to_wd = mock.patch( - 'tripleoclient.utils.copy_to_wd', autospec=True) - mock_copy_to_wd.start() - self.addCleanup(mock_copy_to_wd.stop) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - @mock.patch('tripleoclient.utils.run_role_playbooks', - autospec=True) - def test_ok(self, mock_role_playbooks, mock_playbook): - with tempfile.NamedTemporaryFile() as inp: - with tempfile.NamedTemporaryFile() as outp: - with tempfile.NamedTemporaryFile() as keyf: - inp.write(b'- name: Compute\n- name: Controller\n') - inp.flush() - keyf.write(b'I am a key') - keyf.flush() - with open('{}.pub'.format(keyf.name), 'w') as f: - f.write('I am a key') - key_file_name = keyf.name - - argslist = ['--output', outp.name, - '--overcloud-ssh-key', keyf.name, - '--yes', - inp.name] - verifylist = [('input', inp.name), - ('output', outp.name), - ('overcloud_ssh_key', keyf.name), - ('yes', True)] - - parsed_args = self.check_parser(self.cmd, - argslist, verifylist) - self.cmd.take_action(parsed_args) - - mock_playbook.assert_called_once_with( - extra_vars={ - 'stack_name': 'overcloud', - 'baremetal_deployment': [ - {'name': 'Compute'}, - {'name': 'Controller'} - ], - 'baremetal_deployed_path': mock.ANY, - 'ssh_public_keys': 'I am a key', - 'ssh_user_name': 'tripleo-admin', - 'ssh_private_key_file': key_file_name, - 'node_timeout': 3600, - 'concurrency': 20, - 'manage_network_ports': True, - 'configure_networking': False, - 'working_dir': mock.ANY, - 'templates': constants.TRIPLEO_HEAT_TEMPLATES, - 'overwrite': True, - }, - inventory='localhost,', - playbook='cli-overcloud-node-provision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=mock.ANY, - workdir=mock.ANY - ) - mock_role_playbooks.assert_called_once_with( - self.cmd, - mock.ANY, - '/tmp', - [ - {'name': 'Compute'}, - {'name': 'Controller'} - ], - False - ) - - -class TestUnprovisionNode(fakes.TestOvercloudNode): - - def setUp(self): - super(TestUnprovisionNode, self).setUp() - self.cmd = overcloud_node.UnprovisionNode(self.app, None) - self.cmd.app_args = mock.Mock(verbose_level=1) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - @mock.patch('tripleoclient.utils.tempfile') - @mock.patch('tripleoclient.utils.prompt_user_for_confirmation') - def test_ok(self, mock_prompt, mock_tempfile, mock_playbook): - tmp = tempfile.mkdtemp() - mock_tempfile.mkdtemp.return_value = tmp - mock_prompt.return_value = True - unprovision_confirm = os.path.join(tmp, 'unprovision_confirm.json') - with open(unprovision_confirm, 'w') as confirm: - confirm.write(json.dumps([ - {'hostname': 'compute-0', 'name': 'baremetal-1'}, - {'hostname': 'controller-0', 'name': 'baremetal-2'} - ])) - - with tempfile.NamedTemporaryFile() as inp: - inp.write(b'- name: Compute\n- name: Controller\n') - inp.flush() - argslist = ['--all', inp.name] - verifylist = [('input', inp.name), ('all', True)] - - parsed_args = self.check_parser(self.cmd, - argslist, verifylist) - self.cmd.take_action(parsed_args) - mock_playbook.assert_has_calls([ - mock.call( - extra_vars={ - 'stack_name': 'overcloud', - 'baremetal_deployment': [ - {'name': 'Compute'}, - {'name': 'Controller'} - ], - 'all': True, - 'prompt': True, - 'unprovision_confirm': unprovision_confirm, - 'manage_network_ports': True, - }, - inventory='localhost,', - playbook='cli-overcloud-node-unprovision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=mock.ANY, - workdir=tmp, - ), - mock.call( - extra_vars={ - 'stack_name': 'overcloud', - 'baremetal_deployment': [ - {'name': 'Compute'}, - {'name': 'Controller'} - ], - 'all': True, - 'prompt': False, - 'manage_network_ports': True, - }, - inventory='localhost,', - playbook='cli-overcloud-node-unprovision.yaml', - playbook_dir='/usr/share/ansible/tripleo-playbooks', - verbosity=mock.ANY, - workdir=tmp - ) - ]) diff --git a/tripleoclient/tests/v2/overcloud_support/__init__.py b/tripleoclient/tests/v2/overcloud_support/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/v2/overcloud_support/test_overcloud_support.py b/tripleoclient/tests/v2/overcloud_support/test_overcloud_support.py deleted file mode 100644 index 498ed6400..000000000 --- a/tripleoclient/tests/v2/overcloud_support/test_overcloud_support.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient import constants -from tripleoclient.tests import fakes -from tripleoclient.v2 import overcloud_support - - -class TestOvercloudSupportReport(utils.TestCommand): - - def setUp(self): - super(TestOvercloudSupportReport, self).setUp() - - # Get the command object to test - app_args = mock.Mock() - app_args.verbose_level = 1 - self.app.options = fakes.FakeOptions() - self.cmd = overcloud_support.ReportExecute(self.app, app_args) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_support_noargs(self, mock_playbook): - parsed_args = self.check_parser(self.cmd, ['all'], []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-support-collect-logs.yaml', - inventory=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - 'server_name': 'all', - 'sos_destination': '/var/lib/tripleo/support' - } - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_support_args(self, mock_playbook): - arglist = ['server1', '--output', 'test'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-support-collect-logs.yaml', - inventory=mock.ANY, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - 'server_name': 'server1', - 'sos_destination': 'test' - } - ) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_overcloud_support_args_stack(self, mock_playbook): - arglist = ['server1', '--output', 'test', '--stack', 'notovercloud'] - parsed_args = self.check_parser(self.cmd, arglist, []) - self.cmd.take_action(parsed_args) - inv = os.path.join( - constants.DEFAULT_WORK_DIR, - 'notovercloud/tripleo-ansible-inventory.yaml' - ) - mock_playbook.assert_called_once_with( - workdir=mock.ANY, - playbook='cli-support-collect-logs.yaml', - inventory=inv, - playbook_dir=mock.ANY, - verbosity=3, - extra_vars={ - 'server_name': 'server1', - 'sos_destination': 'test' - } - ) diff --git a/tripleoclient/tests/workflows/__init__.py b/tripleoclient/tests/workflows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/tests/workflows/test_baremetal.py b/tripleoclient/tests/workflows/test_baremetal.py deleted file mode 100644 index 8e5f0000b..000000000 --- a/tripleoclient/tests/workflows/test_baremetal.py +++ /dev/null @@ -1,429 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -from unittest import mock - -import ironic_inspector_client -from oslo_concurrency import processutils -from oslo_utils import units - -from tripleoclient import exceptions -from tripleoclient.tests import fakes -from tripleoclient.workflows import baremetal - - -class TestBaremetalWorkflows(fakes.FakePlaybookExecution): - - def setUp(self): - super(TestBaremetalWorkflows, self).setUp() - self.glance = self.app.client_manager.image = mock.Mock() - self.tripleoclient = mock.Mock() - self.app.client_manager.tripleoclient = self.tripleoclient - self.mock_playbook = mock.patch( - 'tripleoclient.utils.run_ansible_playbook', - autospec=True - ) - self.mock_playbook.start() - self.addCleanup(self.mock_playbook.stop) - - self.node_update = [{'op': 'add', - 'path': '/properties/capabilities', - 'value': 'boot_option:local'}, - {'op': 'add', - 'path': '/driver_info/deploy_ramdisk', - 'value': None}, - {'op': 'add', - 'path': '/driver_info/deploy_kernel', - 'value': None}, - {'op': 'add', - 'path': '/driver_info/rescue_ramdisk', - 'value': None}, - {'op': 'add', - 'path': '/driver_info/rescue_kernel', - 'value': None}] - # Mock data - self.disks = [ - {'name': '/dev/sda', 'size': 11 * units.Gi}, - {'name': '/dev/sdb', 'size': 2 * units.Gi}, - {'name': '/dev/sdc', 'size': 5 * units.Gi}, - {'name': '/dev/sdd', 'size': 21 * units.Gi}, - {'name': '/dev/sde', 'size': 13 * units.Gi}, - ] - for i, disk in enumerate(self.disks): - disk['wwn'] = 'wwn%d' % i - disk['serial'] = 'serial%d' % i - self.baremetal.node.list.return_value = [ - mock.Mock(uuid="ABCDEFGH"), - ] - - self.node = mock.Mock(uuid="ABCDEFGH", properties={}) - self.baremetal.node.get.return_value = self.node - self.inspector.get_data.return_value = { - 'inventory': {'disks': self.disks} - } - self.existing_nodes = [ - {'uuid': '1', 'driver': 'ipmi', - 'driver_info': {'ipmi_address': '10.0.0.1'}}, - {'uuid': '2', 'driver': 'pxe_ipmitool', - 'driver_info': {'ipmi_address': '10.0.0.1', 'ipmi_port': 6235}}, - {'uuid': '3', 'driver': 'foobar', 'driver_info': {}}, - {'uuid': '4', 'driver': 'fake', - 'driver_info': {'fake_address': 42}}, - {'uuid': '5', 'driver': 'ipmi', 'driver_info': {}}, - {'uuid': '6', 'driver': 'pxe_drac', - 'driver_info': {'drac_address': '10.0.0.2'}}, - {'uuid': '7', 'driver': 'pxe_drac', - 'driver_info': {'drac_address': '10.0.0.3', 'drac_port': 6230}}, - ] - - def test_register_or_update_success(self): - self.assertEqual(baremetal.register_or_update( - self.app.client_manager, - nodes_json=[], - instance_boot_option='local' - ), [mock.ANY]) - - def test_introspect_success(self): - baremetal.introspect(self.app.client_manager, node_uuids=[], - run_validations=True, concurrency=20, - node_timeout=1200, max_retries=1, - retry_timeout=120) - - def test_introspect_manageable_nodes_success(self): - baremetal.introspect_manageable_nodes( - self.app.client_manager, run_validations=False, concurrency=20, - node_timeout=1200, max_retries=1, retry_timeout=120, - ) - - def test_run_instance_boot_option(self): - result = baremetal._configure_boot( - self.app.client_manager, - node_uuid='MOCK_UUID', - instance_boot_option='netboot') - self.assertIsNone(result) - self.node_update[0].update({'value': 'boot_option:netboot'}) - self.baremetal.node.update.assert_called_once_with( - mock.ANY, self.node_update) - - def test_run_instance_boot_option_not_set(self): - result = baremetal._configure_boot( - self.app.client_manager, - node_uuid='MOCK_UUID') - self.assertIsNone(result) - self.node_update[0].update({'value': ''}) - self.baremetal.node.update.assert_called_once_with( - mock.ANY, self.node_update) - - def test_run_instance_boot_option_already_set_no_overwrite(self): - node_mock = mock.MagicMock() - node_mock.properties.get.return_value = ({'boot_option': 'netboot'}) - self.app.client_manager.baremetal.node.get.return_value = node_mock - - result = baremetal._configure_boot( - self.app.client_manager, - node_uuid='MOCK_UUID') - self.assertIsNone(result) - self.node_update[0].update({'value': 'boot_option:netboot'}) - self.baremetal.node.update.assert_called_once_with( - mock.ANY, self.node_update) - - def test_run_instance_boot_option_already_set_do_overwrite(self): - node_mock = mock.MagicMock() - node_mock.properties.get.return_value = ({'boot_option': 'netboot'}) - self.app.client_manager.baremetal.node.get.return_value = node_mock - result = baremetal._configure_boot( - self.app.client_manager, - node_uuid='MOCK_UUID', - instance_boot_option='local') - self.assertIsNone(result) - self.node_update[0].update({'value': 'boot_option:local'}) - self.baremetal.node.update.assert_called_once_with( - mock.ANY, self.node_update) - - def test_run_exception_on_node_update(self): - self.baremetal.node.update.side_effect = Exception("Update error") - self.assertRaises( - Exception, - baremetal._configure_boot, - self.app.client_manager, - node_uuid='MOCK_UUID') - - self.inspector.get_data.return_value = { - 'inventory': {'disks': self.disks} - } - - def test_smallest(self): - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn': 'wwn2'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 4}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_smallest_with_ext(self): - self.disks[2]['wwn_with_extension'] = 'wwnext' - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn_with_extension': 'wwnext'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 4}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_largest(self): - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='largest') - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn': 'wwn3'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 20}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_largest_with_ext(self): - self.disks[3]['wwn_with_extension'] = 'wwnext' - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='largest') - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn_with_extension': 'wwnext'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 20}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_no_overwrite(self): - self.node.properties['root_device'] = {'foo': 'bar'} - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_with_overwrite(self): - self.node.properties['root_device'] = {'foo': 'bar'} - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest', - overwrite=True) - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn': 'wwn2'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 4}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_minimum_size(self): - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest', - minimum_size=10) - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn': 'wwn0'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 10}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_bad_inventory(self): - self.inspector.get_data.return_value = {} - self.assertRaisesRegex(exceptions.RootDeviceDetectionError, - "Malformed introspection data", - baremetal._apply_root_device_strategy, - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_no_disks(self): - self.inspector.get_data.return_value = { - 'inventory': { - 'disks': [{'name': '/dev/sda', 'size': 1 * units.Gi}] - } - } - - self.assertRaisesRegex(exceptions.RootDeviceDetectionError, - "No suitable disks", - baremetal._apply_root_device_strategy, - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_md_device_found(self): - self.inspector.get_data.return_value = { - 'inventory': { - 'disks': [{'name': '/dev/md0', 'size': 99 * units.Gi}, - {'name': '/dev/sda', 'size': 100 * units.Gi}] - } - } - - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy=None) - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_no_data(self): - self.inspector.get_data.side_effect = ( - ironic_inspector_client.ClientError(mock.Mock())) - - self.assertRaisesRegex(exceptions.RootDeviceDetectionError, - "No introspection data", - baremetal._apply_root_device_strategy, - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_no_wwn_and_serial(self): - self.inspector.get_data.return_value = { - 'inventory': { - 'disks': [{'name': '/dev/sda', 'size': 10 * units.Gi}] - } - } - - self.assertRaisesRegex(exceptions.RootDeviceDetectionError, - "Neither WWN nor serial number are known", - baremetal._apply_root_device_strategy, - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='smallest') - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_device_list(self): - baremetal._apply_root_device_strategy( - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='hda,sda,sdb,sdc') - self.assertEqual(self.baremetal.node.update.call_count, 1) - root_device_args = self.baremetal.node.update.call_args_list[0] - expected_patch = [{'op': 'add', 'path': '/properties/root_device', - 'value': {'wwn': 'wwn0'}}, - {'op': 'add', 'path': '/properties/local_gb', - 'value': 10}] - self.assertEqual(mock.call('ABCDEFGH', expected_patch), - root_device_args) - - def test_device_list_not_found(self): - self.assertRaisesRegex(exceptions.RootDeviceDetectionError, - "Cannot find a disk", - baremetal._apply_root_device_strategy, - self.app.client_manager, - node_uuid='MOCK_UUID', - strategy='hda') - self.assertEqual(self.baremetal.node.update.call_count, 0) - - def test_existing_ips(self): - result = baremetal._existing_ips(self.existing_nodes) - self.assertEqual({('10.0.0.1', 623), ('10.0.0.1', 6235), - ('10.0.0.2', None), ('10.0.0.3', 6230)}, - set(result)) - - def test_with_list(self): - result = baremetal._get_candidate_nodes( - ['10.0.0.1', '10.0.0.2', '10.0.0.3'], - [623, 6230, 6235], - [['admin', 'password'], ['admin', 'admin']], - self.existing_nodes) - self.assertEqual([ - {'ip': '10.0.0.3', 'port': 623, - 'username': 'admin', 'password': 'password'}, - {'ip': '10.0.0.1', 'port': 6230, - 'username': 'admin', 'password': 'password'}, - {'ip': '10.0.0.3', 'port': 6235, - 'username': 'admin', 'password': 'password'}, - {'ip': '10.0.0.3', 'port': 623, - 'username': 'admin', 'password': 'admin'}, - {'ip': '10.0.0.1', 'port': 6230, - 'username': 'admin', 'password': 'admin'}, - {'ip': '10.0.0.3', 'port': 6235, - 'username': 'admin', 'password': 'admin'}, - ], result) - - def test_with_subnet(self): - result = baremetal._get_candidate_nodes( - '10.0.0.0/30', - [623, 6230, 6235], - [['admin', 'password'], ['admin', 'admin']], - self.existing_nodes) - self.assertEqual([ - {'ip': '10.0.0.1', 'port': 6230, - 'username': 'admin', 'password': 'password'}, - {'ip': '10.0.0.1', 'port': 6230, - 'username': 'admin', 'password': 'admin'}, - ], result) - - def test_invalid_subnet(self): - self.assertRaises( - netaddr.core.AddrFormatError, - baremetal._get_candidate_nodes, - 'meow', - [623, 6230, 6235], - [['admin', 'password'], ['admin', 'admin']], - self.existing_nodes) - - @mock.patch.object(processutils, 'execute', autospec=True) - def test_success(self, mock_execute): - result = baremetal._probe_node('10.0.0.42', 623, - 'admin', 'password') - self.assertEqual({'pm_type': 'ipmi', - 'pm_addr': '10.0.0.42', - 'pm_user': 'admin', - 'pm_password': 'password', - 'pm_port': 623}, - result) - mock_execute.assert_called_once_with('ipmitool', '-I', 'lanplus', - '-H', '10.0.0.42', - '-L', 'ADMINISTRATOR', - '-p', '623', '-U', 'admin', - '-f', mock.ANY, 'power', 'status', - attempts=2) - - @mock.patch.object(processutils, 'execute', autospec=True) - def test_failure(self, mock_execute): - mock_execute.side_effect = processutils.ProcessExecutionError() - self.assertIsNone(baremetal._probe_node('10.0.0.42', 623, - 'admin', 'password')) - mock_execute.assert_called_once_with('ipmitool', '-I', 'lanplus', - '-H', '10.0.0.42', - '-L', 'ADMINISTRATOR', - '-p', '623', '-U', 'admin', - '-f', mock.ANY, 'power', 'status', - attempts=2) diff --git a/tripleoclient/tests/workflows/test_deployment.py b/tripleoclient/tests/workflows/test_deployment.py deleted file mode 100644 index 6a586d2e1..000000000 --- a/tripleoclient/tests/workflows/test_deployment.py +++ /dev/null @@ -1,196 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import shutil -import tempfile -from unittest import mock - - -from osc_lib.tests import utils - -from tripleoclient.tests.fakes import FakeStackObject -from tripleoclient.workflows import deployment - - -class TestDeploymentWorkflows(utils.TestCommand): - - def setUp(self): - super(TestDeploymentWorkflows, self).setUp() - self.tripleoclient = mock.Mock() - self.orig_workdir = deployment.DEFAULT_WORK_DIR - deployment.DEFAULT_WORK_DIR = tempfile.mkdtemp() - - def tearDown(self): - super(TestDeploymentWorkflows, self).tearDown() - shutil.rmtree(deployment.DEFAULT_WORK_DIR) - deployment.DEFAULT_WORK_DIR = self.orig_workdir - - @mock.patch('os.path.join') - @mock.patch('shutil.rmtree') - @mock.patch('os.chdir') - @mock.patch('tripleoclient.utils.tempfile') - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_enable_ssh_admin(self, mock_playbook, mock_tempfile, - mock_chdir, mock_rmtree, mock_join): - hosts = 'a', 'b', 'c' - ssh_user = 'test-user' - ssh_key = 'test-key' - timeout = 30 - - deployment.enable_ssh_admin( - FakeStackObject, - hosts, - ssh_user, - ssh_key, - timeout, - mock.Mock() - ) - - # once for ssh-keygen, then twice per host - self.assertEqual(1, mock_playbook.call_count) - - @mock.patch('tripleoclient.utils.get_excluded_ip_addresses') - @mock.patch('tripleoclient.utils.get_role_net_ip_map') - def test_get_overcloud_hosts(self, mock_role_net_ip_map, - mock_excluded_ip_addresses): - stack = mock.Mock() - working_dir = mock.Mock() - # empty string added to Compute ctlplane to test LP 1990566 fix - mock_role_net_ip_map.return_value = { - 'Controller': { - 'ctlplane': ['1.1.1.1', '2.2.2.2', '3.3.3.3'], - 'external': ['4.4.4.4', '5.5.5.5', '6.6.6.6']}, - 'Compute': { - 'ctlplane': ['7.7.7.7', '', '8.8.8.8', '9.9.9.9'], - 'external': ['10.10.10.10', '11.11.11.11', '12.12.12.12']}, - } - mock_excluded_ip_addresses.return_value = [] - - ips = deployment.get_overcloud_hosts(stack, 'ctlplane', working_dir) - expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3', - '7.7.7.7', '8.8.8.8', '9.9.9.9'] - self.assertEqual(sorted(expected), sorted(ips)) - - ips = deployment.get_overcloud_hosts(stack, 'external', working_dir) - expected = ['4.4.4.4', '5.5.5.5', '6.6.6.6', - '10.10.10.10', '11.11.11.11', '12.12.12.12'] - self.assertEqual(sorted(expected), sorted(ips)) - - @mock.patch('tripleoclient.utils.get_excluded_ip_addresses') - @mock.patch('tripleoclient.utils.get_role_net_ip_map') - def test_get_overcloud_hosts_with_exclude( - self, mock_role_net_ip_map, - mock_excluded_ip_addresses): - stack = mock.Mock() - working_dir = mock.Mock() - stack.output_show.return_value = [] - mock_role_net_ip_map.return_value = { - 'Controller': { - 'ctlplane': ['1.1.1.1', '2.2.2.2', '3.3.3.3'], - 'external': ['4.4.4.4', '5.5.5.5', '6.6.6.6']}, - 'Compute': { - 'ctlplane': ['7.7.7.7', '8.8.8.8', '9.9.9.9'], - 'external': ['10.10.10.10', '11.11.11.11', '12.12.12.12']}, - } - - mock_excluded_ip_addresses.return_value = ['8.8.8.8'] - ips = deployment.get_overcloud_hosts(stack, 'ctlplane', working_dir) - expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3', - '7.7.7.7', '9.9.9.9'] - self.assertEqual(sorted(expected), sorted(ips)) - - ips = deployment.get_overcloud_hosts(stack, 'external', working_dir) - expected = ['4.4.4.4', '5.5.5.5', '6.6.6.6', - '10.10.10.10', '12.12.12.12'] - self.assertEqual(sorted(expected), sorted(ips)) - - mock_excluded_ip_addresses.return_value = ['7.7.7.7', '9.9.9.9', - '2.2.2.2'] - ips = deployment.get_overcloud_hosts(stack, 'external', working_dir) - expected = ['4.4.4.4', '6.6.6.6', '11.11.11.11'] - self.assertEqual(sorted(expected), sorted(ips)) - - @mock.patch('tripleoclient.utils.run_ansible_playbook', - autospec=True) - def test_config_download_already_in_progress_for_diff_stack( - self, mock_playbook): - log = mock.Mock() - stack = mock.Mock() - stack.stack_name = 'stacktest' - stack.output_show.return_value = {'output': {'output_value': []}} - clients = mock.Mock() - deployment.config_download( - log, clients, 'stacktest', 'templates', 'ssh_user', - 'ssh_key', 'ssh_networks', 'output_dir', False, - 'timeout') - - self.assertEqual(2, mock_playbook.call_count) - - def test_config_download_dirs(self): - stack = 'teststack' - old_cd_dir = os.path.join( - deployment.DEFAULT_WORK_DIR, stack) - - with tempfile.TemporaryDirectory() as new: - deployment.make_config_download_dir(new, 'teststack') - # Verify the old config-download dir is a symlink - self.assertTrue(os.path.islink(old_cd_dir)) - # Verify old config-download dir symlink points to new dir - self.assertEqual(os.path.join(os.path.realpath(new), stack), - os.path.realpath(old_cd_dir)) - - def test_config_download_migrate_dirs(self): - stack = 'teststack' - old_cd_dir = os.path.join( - deployment.DEFAULT_WORK_DIR, stack) - - with tempfile.TemporaryDirectory() as new: - os.makedirs(old_cd_dir) - with open(os.path.join(old_cd_dir, 'testfile'), 'w') as old_file: - old_file.write('foo') - - deployment.make_config_download_dir(new, stack) - # Verify the old cd dir was copied to the new dir - self.assertTrue(os.path.exists( - os.path.join(new, stack, old_file.name))) - # Verify the old config-download dir is a symlink - self.assertTrue(os.path.islink(old_cd_dir)) - # Verify old config-download dir symlink points to new dir - self.assertEqual(os.path.join(os.path.realpath(new), stack), - os.path.realpath(old_cd_dir)) - - def test_config_download_no_migrate_dirs(self): - stack = 'teststack' - old_cd_dir = os.path.join( - deployment.DEFAULT_WORK_DIR, stack) - - with tempfile.TemporaryDirectory() as new: - new_cd_dir = os.path.join(new, stack) - os.makedirs(new_cd_dir) - os.makedirs(old_cd_dir) - with open(os.path.join(old_cd_dir, 'testfile'), 'w') as old_file: - old_file.write('foo') - - deployment.make_config_download_dir(new, stack) - # Verify the old cd dir was not copied to the new dir as it already - # exists - self.assertFalse(os.path.exists( - os.path.join(new, stack, old_file.name))) - # Verify the old config-download dir is a symlink - self.assertTrue(os.path.islink(old_cd_dir)) - # Verify old config-download dir symlink points to new dir - self.assertEqual(os.path.join(os.path.realpath(new), stack), - os.path.realpath(old_cd_dir)) diff --git a/tripleoclient/tests/workflows/test_parameters.py b/tripleoclient/tests/workflows/test_parameters.py deleted file mode 100644 index 544c3fa03..000000000 --- a/tripleoclient/tests/workflows/test_parameters.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from osc_lib.tests import utils - -from tripleoclient.workflows import parameters - - -class TestStringCapture(object): - def __init__(self): - self.capture_string = '' - - def write(self, msg): - self.capture_string = self.capture_string + msg - - def getvalue(self): - return self.capture_string - - -class TestParameterWorkflows(utils.TestCommand): - - def setUp(self): - super(TestParameterWorkflows, self).setUp() - self.app.client_manager.baremetal = mock.Mock() - - @mock.patch( - 'tripleo_common.utils.stack_parameters.generate_fencing_parameters', - return_value={}) - def test_generate_fencing_parameters(self, mock_params): - mock_params.return_value = {"parameter_defaults": {}} - - workflow_input = { - 'nodes_json': [], - 'delay': 0, - 'ipmi_level': 'test', - 'ipmi_cipher': 'test', - 'ipmi_lanplus': True - } - params = parameters.generate_fencing_parameters( - **workflow_input - ) - self.assertEqual(params, {"parameter_defaults": {}}) diff --git a/tripleoclient/utils.py b/tripleoclient/utils.py deleted file mode 100644 index a8678889a..000000000 --- a/tripleoclient/utils.py +++ /dev/null @@ -1,3582 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -from collections import abc as collections_abc - -import configparser -import copy -import csv -import datetime -import errno -import getpass -import glob -import hashlib -import json -import logging - -import multiprocessing -import netaddr -import openstack -import os -import os.path -import prettytable -import pwd -import re -import shutil -import socket -import subprocess -import sys -import tarfile -import tempfile -import textwrap -import time -import yaml - -from heatclient.common import event_utils -from heatclient.common import template_utils -from heatclient.common import utils as heat_utils -from heatclient.exc import HTTPNotFound -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from oslo_concurrency import processutils - -from heatclient import exc as hc_exc -from urllib import error as url_error -from urllib import parse as url_parse -from urllib import request - -from tenacity import retry -from tenacity.stop import stop_after_attempt, stop_after_delay -from tenacity.wait import wait_fixed - -from tripleo_common.image import image_uploader -from tripleo_common.image import kolla_builder -from tripleo_common.utils import plan as plan_utils -from tripleo_common.utils import heat as tc_heat_utils -from tripleo_common.utils import stack as stack_utils -from tripleo_common import update -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import heat_launcher - -import warnings -warnings.simplefilter("ignore", UserWarning) - -import ansible_runner # noqa - -LOG = logging.getLogger(__name__ + ".utils") -_local_orchestration_client = None -_heat_pid = None - - -class Pushd(object): - """Simple context manager to change directories and then return.""" - - def __init__(self, directory): - """This context manager will enter and exit directories. - - >>> with Pushd(directory='/tmp'): - ... with open('file', 'w') as f: - ... f.write('test') - - :param directory: path to change directory to - :type directory: `string` - """ - self.dir = directory - self.pwd = self.cwd = os.getcwd() - - def __enter__(self): - os.chdir(self.dir) - self.cwd = os.getcwd() - return self - - def __exit__(self, *args): - if self.pwd != self.cwd: - os.chdir(self.pwd) - - -class TempDirs(object): - """Simple context manager to manage temp directories.""" - - def __init__(self, dir_path=None, dir_prefix='tripleo', cleanup=True, - chdir=True): - """This context manager will create, push, and cleanup temp directories. - - >>> with TempDirs() as t: - ... with open('file', 'w') as f: - ... f.write('test') - ... print(t) - ... os.mkdir('testing') - ... with open(os.path.join(t, 'file')) as w: - ... print(w.read()) - ... with open('testing/file', 'w') as f: - ... f.write('things') - ... with open(os.path.join(t, 'testing/file')) as w: - ... print(w.read()) - - :param dir_path: path to create the temp directory - :type dir_path: `string` - :param dir_prefix: prefix to add to a temp directory - :type dir_prefix: `string` - :param cleanup: when enabled the temp directory will be - removed on exit. - :type cleanup: `boolean` - :param chdir: Change to/from the created temporary dir on enter/exit. - :type chdir: `boolean` - """ - - # NOTE(cloudnull): kwargs for tempfile.mkdtemp are created - # because args are not processed correctly - # in py2. When we drop py2 support (cent7) - # these args can be removed and used directly - # in the `tempfile.mkdtemp` function. - tempdir_kwargs = dict() - if dir_path: - tempdir_kwargs['dir'] = dir_path - - if dir_prefix: - tempdir_kwargs['prefix'] = dir_prefix - - self.dir = tempfile.mkdtemp(**tempdir_kwargs) - self.pushd = Pushd(directory=self.dir) - self.cleanup = cleanup - self.chdir = chdir - - def __enter__(self): - if self.chdir: - self.pushd.__enter__() - return self.dir - - def __exit__(self, *args): - if self.chdir: - self.pushd.__exit__() - if self.cleanup: - self.clean() - else: - LOG.warning("Not cleaning temporary directory [ %s ]" % self.dir) - - def clean(self): - shutil.rmtree(self.dir, ignore_errors=True) - LOG.info("Temporary directory [ %s ] cleaned up" % self.dir) - - -def _encode_envvars(env): - """Encode a hash of values. - - :param env: A hash of key=value items. - :type env: `dict`. - """ - for key, value in env.items(): - env[key] = str(value) - else: - return env - - -def makedirs(dir_path): - """Recursively make directories and log the interaction. - - :param dir_path: full path of the directories to make. - :type dir_path: `string` - :returns: `boolean` - """ - - try: - os.makedirs(dir_path) - except FileExistsError: - LOG.debug( - 'Directory "{}" was not created because it' - ' already exists.'.format( - dir_path - ) - ) - return False - else: - LOG.debug('Directory "{}" was created.'.format(dir_path)) - return True - - -def playbook_limit_parse(limit_nodes): - """Return a parsed string for limits. - - This will sanitize user inputs so that we guarantee what is provided is - expected to be functional. If limit_nodes is None, this function will - return None. - - - :returns: String - """ - - if not limit_nodes: - return limit_nodes - - return ':'.join([i.strip() for i in re.split(',| |:', limit_nodes) if i]) - - -def playbook_verbosity(self): - """Return an integer for playbook verbosity levels. - - :param self: Class object used to interpret the runtime state. - :type self: Object - - :returns: Integer - """ - - if self.app.options.debug: - return 3 - if self.app_args.verbose_level <= 1: - return 0 - return self.app_args.verbose_level - - -def run_ansible_playbook(playbook, inventory, workdir, playbook_dir=None, - connection='smart', output_callback='tripleo_dense', - ssh_user='root', key=None, module_path=None, - limit_hosts=None, tags=None, skip_tags=None, - verbosity=0, quiet=False, extra_vars=None, - extra_vars_file=None, plan='overcloud', - gathering_policy='smart', extra_env_variables=None, - parallel_run=False, - callback_whitelist=constants.ANSIBLE_CWL, - ansible_cfg=None, ansible_timeout=30, - reproduce_command=True, - timeout=None, forks=None, - ignore_unreachable=False, rotate_log=False): - """Simple wrapper for ansible-playbook. - - :param playbook: Playbook filename. - :type playbook: String - - :param inventory: Either proper inventory file, or a coma-separated list. - :type inventory: String - - :param workdir: Location of the working directory. - :type workdir: String - - :param playbook_dir: Location of the playbook directory. - (defaults to workdir). - :type playbook_dir: String - - :param connection: Connection type (local, smart, etc). - :type connection: String - - :param output_callback: Callback for output format. Defaults to - "tripleo_dense". - :type output_callback: String - - :param callback_whitelist: Comma separated list of callback plugins. - Defaults to - "tripleo_dense,tripleo_profile_tasks, - tripleo_states". - Custom output_callback is also whitelisted. - :type callback_whitelist: String - - :param ssh_user: User for the ssh connection. - :type ssh_user: String - - :param key: Private key to use for the ssh connection. - :type key: String - - :param module_path: Location of the ansible module and library. - :type module_path: String - - :param limit_hosts: Limit the execution to the hosts. - :type limit_hosts: String - - :param tags: Run specific tags. - :type tags: String - - :param skip_tags: Skip specific tags. - :type skip_tags: String - - :param verbosity: Verbosity level for Ansible execution. - :type verbosity: Integer - - :param quiet: Disable all output (Defaults to False) - :type quiet: Boolean - - :param extra_vars: Set additional variables as a Dict or the absolute - path of a JSON or YAML file type. - :type extra_vars: Either a Dict or the absolute path of JSON or YAML - - :param extra_vars_file: Set additional ansible variables using an - extravar file. - :type extra_vars_file: Dictionary - - :param plan: Plan name (Defaults to "overcloud"). - :type plan: String - - :param gathering_policy: This setting controls the default policy of - fact gathering ('smart', 'implicit', 'explicit'). - :type gathering_facts: String - - :param extra_env_variables: Dict option to extend or override any of the - default environment variables. - :type extra_env_variables: Dict - - :param parallel_run: Isolate playbook execution when playbooks are to be - executed with multi-processing. - :type parallel_run: Boolean - - :param ansible_cfg: Path to an ansible configuration file. One will be - generated in the artifact path if this option is None. - :type ansible_cfg: String - - :param ansible_timeout: Timeout for ansible connections. - :type ansible_timeout: int - - :param reproduce_command: Enable or disable option to reproduce ansible - commands upon failure. This option will produce - a bash script that can reproduce a failing - playbook command which is helpful for debugging - and retry purposes. - :type reproduce_command: Boolean - - :param timeout: Timeout for ansible to finish playbook execution (minutes). - :type timeout: int - - :param rotate_log: Enable or disable option to rotate ansible.log - :type rotate_log: Boolean - """ - - def _playbook_check(play): - if not os.path.exists(play): - play = os.path.join(playbook_dir, play) - if not os.path.exists(play): - raise RuntimeError('No such playbook: {}'.format(play)) - LOG.debug('Ansible playbook {} found'.format(play)) - return play - - def _inventory(inventory): - if inventory: - if isinstance(inventory, str): - # check is file path - if os.path.exists(inventory): - return inventory - elif isinstance(inventory, dict): - inventory = yaml.safe_dump( - inventory, - default_flow_style=False - ) - inv_file = ansible_runner.utils.dump_artifact( - inventory, - workdir, - constants.ANSIBLE_HOSTS_FILENAME) - os.chmod(inv_file, 0o600) - return inv_file - - def _running_ansible_msg(playbook, timeout=None): - if timeout and timeout > 0: - return ('Running Ansible playbook with timeout %sm: %s,' % - (timeout, playbook)) - return ('Running Ansible playbook: %s,' % playbook) - - if not playbook_dir: - playbook_dir = workdir - - # Ensure that the ansible-runner env exists - runner_env = os.path.join(workdir, 'env') - makedirs(runner_env) - - if extra_vars_file: - runner_extra_vars = os.path.join(runner_env, 'extravars') - with open(runner_extra_vars, 'w') as f: - f.write(yaml.safe_dump(extra_vars_file, default_flow_style=False)) - - if timeout and timeout > 0: - settings_file = os.path.join(runner_env, 'settings') - timeout_value = timeout * 60 - if os.path.exists(settings_file): - with open(settings_file, 'r') as f: - settings_object = yaml.safe_load(f.read()) - settings_object['job_timeout'] = timeout_value - else: - settings_object = {'job_timeout': timeout_value} - - with open(settings_file, 'w') as f: - f.write(yaml.safe_dump(settings_object, default_flow_style=False)) - - if isinstance(playbook, (list, set)): - verified_playbooks = [_playbook_check(play=i) for i in playbook] - playbook = os.path.join(workdir, 'tripleo-multi-playbook.yaml') - with open(playbook, 'w') as f: - f.write( - yaml.safe_dump( - [{'import_playbook': i} for i in verified_playbooks], - default_flow_style=False - ) - ) - - LOG.info( - _running_ansible_msg(playbook, timeout) + - ' multi-playbook execution: {}' - ' Working directory: {},' - ' Playbook directory: {}'.format( - verified_playbooks, - workdir, - playbook_dir - ) - ) - else: - playbook = _playbook_check(play=playbook) - LOG.info( - _running_ansible_msg(playbook, timeout) + - ' Working directory: {},' - ' Playbook directory: {}'.format( - workdir, - playbook_dir - ) - ) - - if limit_hosts: - LOG.info( - 'Running ansible with the following limit: {}'.format( - limit_hosts - ) - ) - ansible_fact_path = os.path.join( - os.path.expanduser('~'), - '.tripleo', - 'fact_cache' - ) - makedirs(ansible_fact_path) - - if output_callback not in callback_whitelist.split(','): - callback_whitelist = ','.join([callback_whitelist, output_callback]) - - if not forks: - forks = min(multiprocessing.cpu_count() * 4, 100) - - env = dict() - env['ANSIBLE_SSH_ARGS'] = ( - '-o UserKnownHostsFile={} ' - '-o StrictHostKeyChecking=no ' - '-o ControlMaster=auto ' - '-o ControlPersist=30m ' - '-o ServerAliveInterval=64 ' - '-o ServerAliveCountMax=1024 ' - '-o Compression=no ' - '-o TCPKeepAlive=yes ' - '-o VerifyHostKeyDNS=no ' - '-o ForwardX11=no ' - '-o ForwardAgent=yes ' - '-o PreferredAuthentications=publickey ' - '-T' - ).format(os.devnull) - env['ANSIBLE_DISPLAY_FAILED_STDERR'] = True - env['ANSIBLE_FORKS'] = forks - env['ANSIBLE_TIMEOUT'] = ansible_timeout - env['ANSIBLE_GATHER_TIMEOUT'] = 45 - env['ANSIBLE_SSH_RETRIES'] = 3 - env['ANSIBLE_PIPELINING'] = True - env['ANSIBLE_SCP_IF_SSH'] = True - env['ANSIBLE_REMOTE_USER'] = ssh_user - env['ANSIBLE_STDOUT_CALLBACK'] = output_callback - env['ANSIBLE_COLLECTIONS_PATHS'] = '/usr/share/ansible/collections' - env['ANSIBLE_LIBRARY'] = ( - '/usr/share/ansible/tripleo-plugins/modules:' - '/usr/share/ansible/plugins/modules:' - '/usr/share/ceph-ansible/library:' - '/usr/share/ansible-modules:' - '{}/library'.format(constants.DEFAULT_VALIDATIONS_BASEDIR) - ) - env['ANSIBLE_LOOKUP_PLUGINS'] = ( - '/usr/share/ansible/tripleo-plugins/lookup:' - '/usr/share/ansible/plugins/lookup:' - '/usr/share/ceph-ansible/plugins/lookup:' - '{}/lookup_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR - ) - ) - env['ANSIBLE_CALLBACK_PLUGINS'] = ( - '/usr/share/ansible/tripleo-plugins/callback:' - '/usr/share/ansible/plugins/callback:' - '/usr/share/ceph-ansible/plugins/callback:' - '{}/callback_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR - ) - ) - env['ANSIBLE_ACTION_PLUGINS'] = ( - '/usr/share/ansible/tripleo-plugins/action:' - '/usr/share/ansible/plugins/action:' - '/usr/share/ceph-ansible/plugins/actions:' - '{}/action_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR - ) - ) - env['ANSIBLE_FILTER_PLUGINS'] = ( - '/usr/share/ansible/tripleo-plugins/filter:' - '/usr/share/ansible/plugins/filter:' - '/usr/share/ceph-ansible/plugins/filter:' - '{}/filter_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR - ) - ) - env['ANSIBLE_ROLES_PATH'] = ( - '/usr/share/ansible/tripleo-roles:' - '/usr/share/ansible/roles:' - '/usr/share/ceph-ansible/roles:' - '/etc/ansible/roles:' - '{}/roles'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR - ) - ) - env['ANSIBLE_CALLBACKS_ENABLED'] = callback_whitelist - env['ANSIBLE_RETRY_FILES_ENABLED'] = False - env['ANSIBLE_HOST_KEY_CHECKING'] = False - env['ANSIBLE_TRANSPORT'] = connection - env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200 - - # Set var handling for better performance - env['ANSIBLE_INJECT_FACT_VARS'] = False - env['ANSIBLE_VARS_PLUGIN_STAGE'] = 'all' - env['ANSIBLE_GATHER_SUBSET'] = '!all,min' - - if connection == 'local': - env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable - - if gathering_policy in ('smart', 'explicit', 'implicit'): - env['ANSIBLE_GATHERING'] = gathering_policy - - if module_path: - env['ANSIBLE_LIBRARY'] = ':'.join( - [env['ANSIBLE_LIBRARY'], module_path] - ) - - env['TRIPLEO_PLAN_NAME'] = plan - - get_uid = int(os.getenv('SUDO_UID', os.getuid())) - try: - user_pwd = pwd.getpwuid(get_uid) - except (KeyError, TypeError): - home = constants.CLOUD_HOME_DIR - else: - home = user_pwd.pw_dir - - env['ANSIBLE_LOG_PATH'] = os.path.join(home, 'ansible.log') - - if key: - env['ANSIBLE_PRIVATE_KEY_FILE'] = key - - # NOTE(cloudnull): Re-apply the original environment ensuring that - # anything defined on the CLI is set accordingly. - env.update(os.environ.copy()) - - if extra_env_variables: - if not isinstance(extra_env_variables, dict): - msg = "extra_env_variables must be a dict" - LOG.error(msg) - raise SystemError(msg) - else: - env.update(extra_env_variables) - - if 'ANSIBLE_CONFIG' not in env and not ansible_cfg: - ansible_cfg = os.path.join(workdir, 'ansible.cfg') - config = configparser.ConfigParser() - if os.path.isfile(ansible_cfg): - config.read(ansible_cfg) - - if 'defaults' not in config.sections(): - config.add_section('defaults') - - config.set('defaults', 'internal_poll_interval', '0.01') - with open(ansible_cfg, 'w') as f: - config.write(f) - env['ANSIBLE_CONFIG'] = ansible_cfg - elif 'ANSIBLE_CONFIG' not in env and ansible_cfg: - env['ANSIBLE_CONFIG'] = ansible_cfg - - command_path = None - with TempDirs(chdir=False) as ansible_artifact_path: - - r_opts = { - 'private_data_dir': workdir, - 'project_dir': playbook_dir, - 'inventory': _inventory(inventory), - 'envvars': _encode_envvars(env=env), - 'playbook': playbook, - 'verbosity': verbosity, - 'quiet': quiet, - 'extravars': extra_vars, - 'fact_cache': ansible_fact_path, - 'fact_cache_type': 'jsonfile', - 'artifact_dir': ansible_artifact_path, - 'rotate_artifacts': 256 - } - - if skip_tags: - r_opts['skip_tags'] = skip_tags - - if tags: - r_opts['tags'] = tags - - if limit_hosts: - r_opts['limit'] = limit_hosts - - if parallel_run: - r_opts['directory_isolation_base_path'] = ansible_artifact_path - - if rotate_log and os.path.exists(env['ANSIBLE_LOG_PATH']): - rotate_ansible_log(env['ANSIBLE_LOG_PATH']) - - runner_config = ansible_runner.runner_config.RunnerConfig(**r_opts) - runner_config.prepare() - runner = ansible_runner.Runner(config=runner_config) - - if reproduce_command: - command_path = os.path.join( - workdir, - "ansible-playbook-command.sh" - ) - with open(command_path, 'w') as f: - f.write('#!/usr/bin/env bash\n') - f.write('echo -e "Exporting environment variables"\n') - for key, value in r_opts['envvars'].items(): - f.write('export {}="{}"\n'.format(key, value)) - f.write('echo -e "Running Ansible command"\n') - args = '{} "$@"\n'.format(' '.join(runner_config.command)) - # Single quote the dict passed to -e - args = re.sub('({.*})', '\'\\1\'', args) - f.write(args) - os.chmod(command_path, 0o750) - - try: - status, rc = runner.run() - finally: - # NOTE(cloudnull): After a playbook executes, ensure the log - # file, if it exists, was created with - # appropriate ownership. - _log_path = r_opts['envvars']['ANSIBLE_LOG_PATH'] - if os.path.isfile(_log_path): - os.chown(_log_path, get_uid, -1) - # Save files we care about - with open(os.path.join(workdir, 'stdout'), 'w') as f: - f.write(runner.stdout.read()) - for output in 'status', 'rc': - val = getattr(runner, output) - if val: - with open(os.path.join(workdir, output), 'w') as f: - f.write(str(val)) - - if rc != 0: - if rc == 4 and ignore_unreachable: - LOG.info('Ignoring unreachable nodes') - else: - err_msg = ( - 'Ansible execution failed. playbook: {},' - ' Run Status: {},' - ' Return Code: {}'.format( - playbook, - status, - rc - ) - ) - if command_path: - err_msg += ( - ', To rerun the failed command manually execute the' - ' following script: {}'.format( - command_path - ) - ) - - if not quiet: - LOG.error(err_msg) - - raise RuntimeError(err_msg) - - LOG.info( - 'Ansible execution success. playbook: {}'.format( - playbook)) - - -def convert(data): - """Recursively converts dictionary keys,values to strings.""" - if isinstance(data, str): - return str(data) - if isinstance(data, collections_abc.Mapping): - return dict(map(convert, data.items())) - if isinstance(data, collections_abc.Iterable): - return type(data)(map(convert, data)) - return data - - -def bracket_ipv6(address): - """Put a bracket around address if it is valid IPv6 - - Return it unchanged if it is a hostname or IPv4 address. - """ - try: - socket.inet_pton(socket.AF_INET6, address) - return "[%s]" % address - except socket.error: - return address - - -def is_valid_ip(ip): - """Return True if the IP is either v4 or v6 - - Return False if invalid. - """ - return netaddr.valid_ipv4(ip) or netaddr.valid_ipv6(ip) - - -def is_loopback(host): - """Return True of the IP or the host is a loopback - - Return False if not. - """ - loopbacks = ['127', '::1'] - for item in loopbacks: - if host.startswith(item): - return True - return False - - -def get_host_ips(host, socket_type=None): - """Lookup an host to return a list of IPs. - - :param host: Host to lookup - :type host: string - - :param socket_type: Type of a socket (e.g. socket.AF_INET, socket.AF_INET6) - :type socket_type: string - """ - - ips = set() - if socket_type: - socket_types = (socket_type,) - else: - socket_types = (socket.AF_INET, socket.AF_INET6) - for t in socket_types: - try: - res = socket.getaddrinfo(host, None, t, socket.SOCK_STREAM) - except socket.error: - continue - nips = set([x[4][0] for x in res]) - ips.update(nips) - return list(ips) - - -def get_single_ip(host, allow_loopback=False, ip_version=4): - """Translate an hostname into a single IP address if it is a valid IP. - - :param host: IP or hostname or FQDN to lookup - :type host: string - - :param allow_loopback: Whether or not a loopback IP can be returned. - Defaults is False. - :type allow_loopback: boolean - - Return the host unchanged if it is already an IPv4 or IPv6 address. - """ - - ip = host - if not is_valid_ip(host): - socket_type = socket.AF_INET6 if ip_version == 6 else socket.AF_INET - ips = get_host_ips(host, socket_type=socket_type) - if not ips: - raise exceptions.LookupError('No IP was found for the host: ' - '%s' % host) - else: - ip = ips[0] - if len(ips) > 1: - raise exceptions.LookupError('More than one IP was found for the ' - 'host %s: %s' % (host, ips)) - if not allow_loopback and is_loopback(ip): - raise exceptions.LookupError('IP address for host %s is a loopback' - ' IP: %s' % (host, ip)) - if not is_valid_ip(ip): - raise exceptions.LookupError('IP address for host %s is not a ' - 'valid IP: %s' % (host, ip)) - return ip - - -def write_env_file(env_data, env_file, registry_overwrites): - """Write the tht env file as yaml""" - - data = {'parameter_defaults': env_data} - if registry_overwrites: - data['resource_registry'] = registry_overwrites - with open(env_file, "w") as f: - dumper = yaml.dumper.SafeDumper - dumper.ignore_aliases = lambda self, data: True - yaml.dump(data, f, default_flow_style=False, Dumper=dumper) - - -def store_cli_param(command_name, parsed_args): - """write the cli parameters into an history file""" - - # The command name is the part after "openstack" with spaces. Switching - # to "-" makes it easier to read. "openstack undercloud install" will be - # stored as "undercloud-install" for example. - command_name = command_name.replace(" ", "-") - - history_path = os.path.join(constants.CLOUD_HOME_DIR, '.tripleo') - try: - os.mkdir(history_path, 0o700) - os.chown(history_path, - int(os.environ.get('SUDO_UID', -1)), - int(os.environ.get('SUDO_GID', -1))) - except OSError as e: - if e.errno != errno.EEXIST: - messages = _("Unable to create the .tripleo directory: " - "{0}, {1}").format(history_path, e) - raise IOError(messages) - - if os.path.isdir(history_path): - try: - history_file_path = os.path.join(history_path, 'history') - with open(history_file_path, 'a') as history: - args = parsed_args.__dict__.copy() - used_args = ', '.join('%s=%s' % (key, value) - for key, value in args.items()) - history.write(' '.join([str(datetime.datetime.now()), - str(command_name), used_args, "\n"])) - os.chown(history_file_path, - int(os.environ.get('SUDO_UID', -1)), - int(os.environ.get('SUDO_GID', -1))) - except IOError as e: - messages = _("Unable to write into TripleO history file: " - "{0}, {1}").format(history_path, e) - raise IOError(messages) - else: - raise exceptions.InvalidConfiguration(_("Target path %s is not a " - "directory") % history_path) - - -def create_tempest_deployer_input(config_name='tempest-deployer-input.conf', - output_dir=None): - config = configparser.ConfigParser() - - # Create required sections - for section in ('auth', 'compute', 'compute-feature-enabled', 'identity', - 'image', 'network', 'object-storage', - 'volume', 'volume-feature-enabled'): - config.add_section(section) - - # Dynamic credentials means tempest will create the required credentials if - # a test requires a new account to work, tempest will create one just for - # that test - config.set('auth', 'use_dynamic_credentials', 'true') - - # Does the test environment support obtaining instance serial console - # output? (default: true) - # set in [nova.serial_console]->enabled - config.set('compute-feature-enabled', 'console_output', 'false') - - # Name of the backend1 (must be declared in cinder.conf) - # (default: 'BACKEND_1') - # set in [cinder]->enabled_backends - config.set('volume', 'backend1_name', 'tripleo_iscsi') - - # Update bootable status of a volume Not implemented on icehouse - # (default: false) - # python-cinderclient supports set-bootable - config.set('volume-feature-enabled', 'bootable', 'true') - - # Fix region value because TripleO is using non-standard value - for section in ('compute', 'identity', 'image', 'network', - 'object-storage', 'volume'): - config.set(section, 'region', 'regionOne') - - if output_dir: - config_path = os.path.join(output_dir, config_name) - else: - config_path = config_name - with open(config_path, 'w+') as config_file: - config.write(config_file) - - -def wait_for_stack_ready(orchestration_client, stack_name, marker=None, - action='CREATE', nested_depth=2, - max_retries=10): - """Check the status of an orchestration stack - - Get the status of an orchestration stack and check whether it is complete - or failed. - - :param orchestration_client: Instance of Orchestration client - :type orchestration_client: heatclient.v1.client.Client - - :param stack_name: Name or UUID of stack to retrieve - :type stack_name: string - - :param marker: UUID of the last stack event before the current action - :type marker: string - - :param action: Current action to check the stack for COMPLETE - :type action: string - - :param verbose: Whether to print events - :type verbose: boolean - - :param nested_depth: Max depth to look for events - :type nested_depth: int - - :param max_retries: Number of retries in the case of server problems - :type max_retries: int - """ - log = logging.getLogger(__name__ + ".wait_for_stack_ready") - stack = get_stack(orchestration_client, stack_name) - if not stack: - return False - stack_name = "%s/%s" % (stack.stack_name, stack.id) - - retries = 0 - - while retries <= max_retries: - try: - stack_status, msg = event_utils.poll_for_events( - orchestration_client, stack_name, action=action, - poll_period=5, marker=marker, out=sys.stdout, - nested_depth=nested_depth) - print(msg) - return stack_status == '%s_COMPLETE' % action - except hc_exc.HTTPException as e: - if e.code in [500, 503, 504]: - retries += 1 - log.warning("Server issue while waiting for stack to be ready." - " Attempting retry {} of {}".format(retries, - max_retries)) - time.sleep(retries * 5) - continue - log.error("Error occured while waiting for stack to be ready.") - raise e - - raise RuntimeError( - "wait_for_stack_ready: Max retries {} reached".format(max_retries)) - - -def get_stack_output_item(stack, item): - if not stack: - return None - - for output in stack.to_dict().get('outputs', {}): - if output['output_key'] == item: - return output['output_value'] - # item not found in outputs - return None - - -def get_stack_saved_output_item(output, working_dir): - outputs_dir = os.path.join(working_dir, 'outputs') - output_path = os.path.join(outputs_dir, output) - if not os.path.isfile(output_path): - return None - with open(output_path) as f: - return yaml.safe_load(f.read()) - - -def get_overcloud_endpoint(working_dir): - return get_stack_saved_output_item('KeystoneURL', working_dir) - - -def get_service_ips(stack): - service_ips = {} - for output in stack.to_dict().get('outputs', {}): - service_ips[output['output_key']] = output['output_value'] - return service_ips - - -def get_endpoint_map(working_dir): - endpoint_map = get_stack_saved_output_item('EndpointMap', working_dir) - if not endpoint_map: - endpoint_map = {} - return endpoint_map - - -def get_excluded_ip_addresses(working_dir): - return get_stack_saved_output_item( - 'BlacklistedIpAddresses', working_dir) - - -def get_role_net_ip_map(working_dir): - return get_stack_saved_output_item( - 'RoleNetIpMap', working_dir) - - -def get_stack(orchestration_client, stack_name): - """Get the ID for the current deployed overcloud stack if it exists. - - Caller is responsible for checking if return is None - """ - - try: - stack = orchestration_client.stacks.get(stack_name) - return stack - except HTTPNotFound: - pass - - -def get_rc_params(working_dir): - rc_params = {} - rc_params['password'] = get_stack_saved_output_item( - 'AdminPassword', working_dir) - rc_params['region'] = get_stack_saved_output_item( - 'KeystoneRegion', working_dir) - return rc_params - - -def check_ceph_ansible(resource_registry, stage): - """Fail if ceph-ansible is still passed - - If any of the ceph-ansible related resources are part of the - Ceph services path, then the overcloud deploy (or the stack - update) should fail, unless they are included in the context - of Update/Upgrade/Converge, where these environments are still - relevant and required. - """ - - if not resource_registry or stage not in "DeployOvercloud": - return - - # for each Ceph related service, fail if ceph-ansible is part - # of the provided path - for name, path in resource_registry.items(): - if 'Ceph' in name and 'ceph-ansible' in path: - raise exceptions.InvalidConfiguration('The Ceph deployment is not ' - 'available anymore using ' - 'ceph-ansible. If you want ' - 'to deploy Ceph, please add ' - 'the cephadm environment ' - 'file.') - - -def check_deployed_ceph_stage(environment): - """Raises an exception if Ceph is being deployed without DeployedCeph:True. - - If Ceph is not being deployed or DeployedCeph is true, then return - nothing, so the program that calls this function can continue without - error. This function also looks for the external Ceph Heat resource to - make sure in this scenario an error is not raised regardless of the - DeployedCeph boolean value. - """ - - resource_registry = environment.get('resource_registry', {}) - - if not resource_registry: - return - - ceph_external = environment.get('resource_registry', {}).get( - 'OS::TripleO::Services::CephExternal', 'OS::Heat::None') - - if ceph_external != "OS::Heat::None": - return - - # it's not an external Ceph cluster, let's evaluate the DeployedCeph param - # and the Ceph resources provided - deployed_ceph = environment.get('parameter_defaults', - {}).get('DeployedCeph', False) - - # for each ceph resource, if the path contains cephadm and the DeployedCeph - # boolean is not True, raise an exception and guide the operator through - # the right path of deploying ceph - - for name, path in resource_registry.items(): - if 'Ceph' in name and 'cephadm' in path and not deployed_ceph: - raise exceptions.InvalidConfiguration('Ceph deployment is not ' - 'available anymore during ' - 'overcloud deploy. If you ' - 'want to deploy Ceph, ' - 'please see "openstack ' - ' overcloud ceph deploy ' - '--help" to deploy ceph ' - ' before deploying the ' - 'overcloud and then include ' - 'the cephadm environment ' - 'file.') - - -def check_ceph_fsid_matches_env_files(old_env, environment): - """Check CephClusterFSID against proposed env files - - There have been cases where operators inadvertenly changed the - CephClusterFSID on a stack update, which is unsupported by both - Ceph and openstack. - For this reason we need to check that the existing deployed Ceph - cluster ID present in the stack is consistent with the value of - the environment, raising an exception if they are different. - """ - env_ceph_fsid = environment.get('parameter_defaults', - {}).get('CephClusterFSID', False) - stack_ceph_fsid = old_env.get('parameter_defaults', - {}).get('CephClusterFSID', False) - - if bool(env_ceph_fsid) and env_ceph_fsid != stack_ceph_fsid: - raise exceptions.InvalidConfiguration('The CephFSID environment value ' - ' ({}) does not match the stack ' - ' configuration value ({}).' - ' Ensure the CephClusterFSID ' - ' param is properly configured ' - ' in the storage environment ' - ' files.' - .format(env_ceph_fsid, - stack_ceph_fsid)) - - -def check_swift_and_rgw(old_env, env, stage): - """Check that Swift and RGW aren't both enabled in the overcloud - - When Ceph is deployed by TripleO using the default cephadm environment - file, the RGW component is included by default and deployed on both - greenfield and brownfield deployments. - However, if an overcloud upgrade is run and Swift was already deployed, - the RGW resource shouldn't replace Swift and -e cephadm-rbd-only.yaml - should be passed. - For this reason we need to check if Swift was previously enabled, and - fail if the RGW resource is passed. - """ - rgw_env = env.get('resource_registry', - {}).get('OS::TripleO::Services::CephRgw', - 'OS::Heat::None') - - allowed_stage = re.compile("(Upgrade|Update)Prepare", re.I) - # if the RGW resource isn't passed or we're not in the upgrade context - # there's no need to run this check - if not re.match(allowed_stage, stage) or rgw_env == 'OS::Heat::None': - return - - sw = old_env.get('resource_registry', - {}).get('OS::TripleO::Services::SwiftProxy', - 'OS::Heat::None') - - # RGW is present in the env list and swift was previously deployed - if sw != "OS::Heat::None": - raise exceptions.InvalidConfiguration('Both Swift and RGW resources ' - 'detected. ' - 'Ensure you have only one of ' - 'them enabled (or provide the ' - 'cephadm-rbd-only.yaml ' - 'environment file to exclude ' - 'RGW)') - - -def check_network_plugin(output_dir, env): - """Disallow upgrade if change in network plugin detected - - If the undercloud is upgraded with a change in network plugin - i.e ovs to ovn or ovn to ovs it will break the undercloud as - just switching is not enough it needs network resources to be - migrated, so we detect if there is change in network and block - the upgrade - """ - - neutron_env = env.get('resource_registry', - {}).get('OS::TripleO::Services::NeutronApi', - 'OS::Heat::None') - - # Neutron is not deployed so just return - if neutron_env == "OS::Heat::None": - return - - parameters = env.get('parameter_defaults', {}) - - file_name = constants.TRIPLEO_STATIC_INVENTORY - - inventory_path = os.path.join(output_dir, file_name) - - if not os.path.isfile(inventory_path): - message = (_("The %s inventory file is missing. Without it " - "network plugin change can't be detected, and upgrade " - "will have issues if there is a change" % inventory_path)) - LOG.error(message) - raise exceptions.InvalidConfiguration(message) - - with open(inventory_path, 'r') as f: - inventory_data = yaml.safe_load(f) - - if ('neutron_ovs_agent' in inventory_data and - 'ovn' in parameters.get('NeutronMechanismDrivers')): - message = _("Network Plugin mismatch detected, " - "Upgrade from ml2 ovs to ml2 ovn is not allowed") - LOG.error(message) - raise exceptions.InvalidConfiguration(message) - elif ("ovn_controller" in inventory_data and - "openvswitch" in parameters.get('NeutronMechanismDrivers')): - message = _("Network Plugin mismatch detected, " - "Upgrade from ml2 ovn to ml2 ovs is not allowed") - LOG.error(message) - raise exceptions.InvalidConfiguration(message) - - -def check_service_vips_migrated_to_service(environment): - registry = environment.get('resource_registry', {}) - removed_resources = {'OS::TripleO::Network::Ports::RedisVipPort', - 'OS::TripleO::Network::Ports::OVNDBsVipPort'} - msg = ("Resources 'OS::TripleO::Network::Ports::RedisVipPort' and " - "'OS::TripleO::Network::Ports::OVNDBsVipPort' can no longer be " - "used. Service VIPs has been moved to the service definition " - "template. To configure a specific IP address use the parameters " - "'RedisVirtualFixedIPs' and/or 'OVNDBsVirtualFixedIPs'. To control" - "the network or subnet for VIP allocation set up the " - "'ServiceNetMap' and/or 'VipSubnetMap' parameters with the desired " - "network and/or subnet for the service.") - for resource in removed_resources: - if (resource in registry and - registry.get(resource) != 'OS::Heat::None'): - raise exceptions.InvalidConfiguration(msg) - - -def check_neutron_resources(environment): - registry = environment.get('resource_registry', {}) - msg = ("Resource {} maps to type {} and the Neutron " - "service is not available when using ephemeral Heat. " - "The generated environments from " - "'openstack overcloud baremetal provision' and " - "'openstack overcloud network provision' must be included " - "with the deployment command.") - for rsrc, rsrc_type in registry.items(): - if (type(rsrc_type) == str and - rsrc_type.startswith("OS::Neutron")): - raise exceptions.InvalidConfiguration(msg.format(rsrc, rsrc_type)) - - -def remove_known_hosts(overcloud_ip): - """For a given IP address remove SSH keys from the known_hosts file""" - - known_hosts = os.path.join(constants.CLOUD_HOME_DIR, '.ssh/known_hosts') - - if os.path.exists(known_hosts): - command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts] - subprocess.check_call(command) - - -def file_checksum(filepath, hash_algo='sha512'): - """Calculate sha512 checksum on file - :param filepath: Full path to file (e.g. /home/stack/image.qcow2) - :type filepath: string - :param hash_algo: name of the hash algorithm, 'sha512' by default - :type hash_algo: string - - :returns: hexadecimal hash of the file - - :raises: - RuntimeError if the 'hash_algo' value isn't supported. - ValueError if the path isn't pointing to a regular file. - """ - if not os.path.isfile(filepath): - raise ValueError(_("The given file {0} is not a regular " - "file").format(filepath)) - - if hash_algo not in constants.FIPS_COMPLIANT_HASHES: - raise RuntimeError( - "The requested hash algorithm (%s) is not supported." % hash_algo) - - checksum = hashlib.new(hash_algo) - - with open(filepath, 'rb') as f: - while True: - fragment = f.read(65536) - if not fragment: - break - checksum.update(fragment) - return checksum.hexdigest() - - -def ensure_run_as_normal_user(): - """Check if the command runs under normal user (EUID!=0)""" - if os.geteuid() == 0: - raise exceptions.RootUserExecution(_( - 'This command cannot run under root user.' - ' Switch to a normal user.')) - - -def get_deployment_user(): - """Return the user name which is used to deploy the cloud""" - return getpass.getuser() - - -def capabilities_to_dict(caps): - """Convert the Node's capabilities into a dictionary.""" - if not caps: - return {} - return dict([key.split(':', 1) for key in caps.split(',')]) - - -def dict_to_capabilities(caps_dict): - """Convert a dictionary into a string with the capabilities syntax.""" - return ','.join(["%s:%s" % (key, value) - for key, value in caps_dict.items() - if value is not None]) - - -def node_get_capabilities(node): - """Get node capabilities.""" - return capabilities_to_dict(node.properties.get('capabilities')) - - -def node_add_capabilities(bm_client, node, **updated): - """Add or replace capabilities for a node.""" - caps = node_get_capabilities(node) - caps.update(updated) - converted_caps = dict_to_capabilities(caps) - node.properties['capabilities'] = converted_caps - bm_client.node.update(node.uuid, [{'op': 'add', - 'path': '/properties/capabilities', - 'value': converted_caps}]) - return caps - - -def assign_and_verify_profiles(bm_client, flavors, - assign_profiles=False, dry_run=False): - """Assign and verify profiles for given flavors. - - :param bm_client: ironic client instance - :param flavors: map flavor name -> (flavor object, required count) - :param assign_profiles: whether to allow assigning profiles to nodes - :param dry_run: whether to skip applying actual changes (only makes sense - if assign_profiles is True) - :returns: tuple (errors count, warnings count) - """ - log = logging.getLogger(__name__ + ".assign_and_verify_profiles") - predeploy_errors = 0 - predeploy_warnings = 0 - - # nodes available for deployment and scaling (including active) - bm_nodes = {node.uuid: node - for node in bm_client.node.list(maintenance=False, - detail=True) - if node.provision_state in ('available', 'active')} - # create a pool of unprocessed nodes and record their capabilities - free_node_caps = {uu: node_get_capabilities(node) - for uu, node in bm_nodes.items()} - - # TODO(dtantsur): use command-line arguments to specify the order in - # which profiles are processed (might matter for assigning profiles) - profile_flavor_used = False - for flavor_name, (flavor, scale) in flavors.items(): - if not scale: - log.debug("Skipping verification of flavor %s because " - "none will be deployed", flavor_name) - continue - - profile = flavor.get_keys().get('capabilities:profile') - # If there's only a single flavor, then it's expected for it to have - # no profile assigned. - if not profile and len(flavors) > 1: - predeploy_errors += 1 - log.error( - 'Error: The %s flavor has no profile associated', flavor_name) - log.error( - 'Recommendation: assign a profile with openstack flavor ' - 'set --property "capabilities:profile"="PROFILE_NAME" %s', - flavor_name) - continue - - profile_flavor_used = True - - # first collect nodes with known profiles - assigned_nodes = [uu for uu, caps in free_node_caps.items() - if caps.get('profile') == profile] - required_count = scale - len(assigned_nodes) - - if required_count < 0: - log.warning('%d nodes with profile %s won\'t be used ' - 'for deployment now', -required_count, profile) - predeploy_warnings += 1 - required_count = 0 - elif required_count > 0 and assign_profiles: - # find more nodes by checking XXX_profile capabilities that are - # set by ironic-inspector or manually - capability = '%s_profile' % profile - more_nodes = [ - uu for uu, caps in free_node_caps.items() - # use only nodes without a know profile - if not caps.get('profile') and - caps.get(capability, '').lower() in ('1', 'true') and - # do not assign profiles for active nodes - bm_nodes[uu].provision_state == 'available' - ][:required_count] - assigned_nodes.extend(more_nodes) - required_count -= len(more_nodes) - - for uu in assigned_nodes: - # make sure these nodes are not reused for other profiles - node_caps = free_node_caps.pop(uu) - # save profile for newly assigned nodes, but only if we - # succeeded in finding enough of them - if not required_count and not node_caps.get('profile'): - node = bm_nodes[uu] - if not dry_run: - node_add_capabilities(bm_client, node, profile=profile) - log.info('Node %s was assigned profile %s', uu, profile) - else: - log.debug('Node %s has profile %s', uu, profile) - - if required_count > 0: - log.error( - "Error: only %s of %s requested ironic nodes are tagged " - "to profile %s (for flavor %s)", - scale - required_count, scale, profile, flavor_name - ) - log.error( - "Recommendation: tag more nodes using $ openstack baremetal " - "node set --properties capabilities=profile:%s, ", - profile) - predeploy_errors += 1 - - nodes_without_profile = [uu for uu, caps in free_node_caps.items() - if not caps.get('profile')] - if nodes_without_profile and profile_flavor_used: - predeploy_warnings += 1 - log.warning( - "There are %d ironic nodes with no profile that will " - "not be used: %s", len(nodes_without_profile), - ', '.join(nodes_without_profile) - ) - - return predeploy_errors, predeploy_warnings - - -def add_deployment_plan_arguments(parser): - """Add deployment plan arguments (flavors and scales) to a parser""" - - # TODO(d0ugal): Deprecated in Newton. Remove these in U. - parser.add_argument('--control-scale', type=int, - help=_('New number of control nodes.')) - parser.add_argument('--compute-scale', type=int, - help=_('New number of compute nodes.')) - parser.add_argument('--ceph-storage-scale', type=int, - help=_('New number of ceph storage nodes.')) - parser.add_argument('--block-storage-scale', type=int, - help=_('New number of cinder storage nodes.')) - parser.add_argument('--swift-storage-scale', type=int, - help=_('New number of swift storage nodes.')) - parser.add_argument('--control-flavor', - help=_('Nova flavor to use for control nodes.')) - parser.add_argument('--compute-flavor', - help=_('Nova flavor to use for compute nodes.')) - parser.add_argument('--ceph-storage-flavor', - help=_('Nova flavor to use for ceph storage nodes.')) - parser.add_argument('--block-storage-flavor', - help=_('Nova flavor to use for cinder storage nodes')) - parser.add_argument('--swift-storage-flavor', - help=_('Nova flavor to use for swift storage nodes')) - - -def get_roles_info(parsed_args): - """Get flavor name and scale for all deployment roles. - - :returns: dict role name -> (flavor name, scale) - """ - return { - 'control': (parsed_args.control_flavor, parsed_args.control_scale), - 'compute': (parsed_args.compute_flavor, parsed_args.compute_scale), - 'ceph-storage': (parsed_args.ceph_storage_flavor, - parsed_args.ceph_storage_scale), - 'block-storage': (parsed_args.block_storage_flavor, - parsed_args.block_storage_scale), - 'swift-storage': (parsed_args.swift_storage_flavor, - parsed_args.swift_storage_scale) - } - - -def _csv_to_nodes_dict(nodes_csv): - """Convert CSV to a list of dicts formatted for os_cloud_config - - Given a CSV file in the format below, convert it into the - structure expected by os_cloud_config JSON files. - - pm_type, pm_addr, pm_user, pm_password, mac - """ - - data = [] - - for row in csv.reader(nodes_csv): - node = { - "pm_user": row[2], - "pm_addr": row[1], - "pm_password": row[3], - "pm_type": row[0], - "mac": [ - row[4] - ] - } - - try: - node['pm_port'] = row[5] - except IndexError: - pass - - data.append(node) - - return data - - -def parse_env_file(env_file, file_type=None): - if file_type == 'json' or env_file.name.endswith('.json'): - nodes_config = json.load(env_file) - elif file_type == 'csv' or env_file.name.endswith('.csv'): - nodes_config = _csv_to_nodes_dict(env_file) - elif env_file.name.endswith('.yaml'): - nodes_config = yaml.safe_load(env_file) - else: - raise exceptions.InvalidConfiguration( - _("Invalid file extension for %s, must be json, yaml or csv") % - env_file.name) - - if 'nodes' in nodes_config: - nodes_config = nodes_config['nodes'] - - return nodes_config - - -def prompt_user_for_confirmation(message, logger, positive_response='y'): - """Prompt user for a y/N confirmation - - Use this function to prompt the user for a y/N confirmation - with the provided message. The [y/N] should be included in - the provided message to this function to indicate the expected - input for confirmation. You can customize the positive response if - y/N is not a desired input. - - :param message: Confirmation string prompt - :param logger: logger object used to write info logs - :param positive_response: Beginning character for a positive user input - :return: boolean true for valid confirmation, false for all others - """ - try: - if not sys.stdin.isatty(): - logger.error(_('User interaction required, cannot confirm.')) - return False - sys.stdout.write(message) - sys.stdout.flush() - prompt_response = sys.stdin.readline().lower() - if not prompt_response.startswith(positive_response): - logger.info(_( - 'User did not confirm action so taking no action.')) - return False - logger.info(_('User confirmed action.')) - return True - except KeyboardInterrupt: # ctrl-c - logger.info(_( - 'User did not confirm action (ctrl-c) so taking no action.')) - except EOFError: # ctrl-d - logger.info(_( - 'User did not confirm action (ctrl-d) so taking no action.')) - return False - - -def replace_links_in_template_contents(contents, link_replacement): - """Replace get_file and type file links in Heat template contents - - If the string contents passed in is a Heat template, scan the - template for 'get_file' and 'type' occurrences, and replace the - file paths according to link_replacement dict. (Key/value in - link_replacement are from/to, respectively.) - - If the string contents don't look like a Heat template, return the - contents unmodified. - """ - - template = {} - try: - template = yaml.safe_load(contents) - except yaml.YAMLError: - return contents - - if not (isinstance(template, dict) and - template.get('heat_template_version')): - return contents - - template = replace_links_in_template(template, link_replacement) - - return yaml.safe_dump(template) - - -def replace_links_in_template(template_part, link_replacement): - """Replace get_file and type file links in a Heat template - - Scan the template for 'get_file' and 'type' occurrences, and - replace the file paths according to link_replacement - dict. (Key/value in link_replacement are from/to, respectively.) - """ - - def replaced_dict_value(key, value): - if ((key == 'get_file' or key == 'type') and - isinstance(value, str)): - return link_replacement.get(value, value) - return replace_links_in_template(value, link_replacement) - - def replaced_list_value(value): - return replace_links_in_template(value, link_replacement) - - if isinstance(template_part, dict): - return {k: replaced_dict_value(k, v) - for k, v in template_part.items()} - if isinstance(template_part, list): - return list(map(replaced_list_value, template_part)) - return template_part - - -def relative_link_replacement(link_replacement, current_dir): - """Generate a relative version of link_replacement dictionary. - - Get a link_replacement dictionary (where key/value are from/to - respectively), and make the values in that dictionary relative - paths with respect to current_dir. - """ - - return {k: os.path.relpath(v, current_dir) - for k, v in link_replacement.items()} - - -def load_environment_directories(directories): - log = logging.getLogger(__name__ + ".load_environment_directories") - - if os.environ.get('TRIPLEO_ENVIRONMENT_DIRECTORY'): - directories.append(os.environ.get('TRIPLEO_ENVIRONMENT_DIRECTORY')) - - environments = [] - for d in directories: - if os.path.exists(d) and d != '.': - log.debug("Environment directory: %s" % d) - for f in sorted(glob.glob(os.path.join(d, '*.yaml'))): - log.debug("Environment directory file: %s" % f) - if os.path.isfile(f): - environments.append(f) - return environments - - -def get_key(stack, needs_pair=False): - """Returns the private key from the local file system. - - Searches for and returns the stack private key. If the key is inaccessible - for any reason, the process will fall back to using the users key. If no - key is found, this method will return None. - - :params stack: name of the stack to use - :type stack: String - - :param needs_pair: Enable key pair search - :type needs_pair: Boolean - - :returns: String || None - """ - - key_files = list() - stack_dir = get_default_working_dir(stack) - key_files.append(os.path.join(stack_dir, 'ssh_private_key')) - user_dir = os.path.join(constants.CLOUD_HOME_DIR, '.ssh') - key_files.append(os.path.join(user_dir, 'id_rsa_tripleo')) - key_files.append(os.path.join(user_dir, 'id_rsa')) - legacy_dir = os.path.join(constants.DEFAULT_WORK_DIR, '.ssh') - key_files.append(os.path.join(legacy_dir, 'tripleo-admin-rsa')) - for key_file in key_files: - try: - if os.path.exists(key_file): - if needs_pair: - if not os.path.exists('{}.pub'.format(key_file)): - continue - with open(key_file): - return key_file - except IOError: - pass - else: - return - - -def get_tripleo_ansible_inventory(inventory_file=None, - ssh_user='tripleo-admin', - stack='overcloud', - undercloud_connection='ssh', - return_inventory_file_path=False): - if not inventory_file: - inventory_file = os.path.join( - constants.CLOUD_HOME_DIR, - 'tripleo-ansible-inventory.yaml' - ) - - command = ['/usr/bin/tripleo-ansible-inventory', - '--os-cloud', 'undercloud'] - if stack: - command.extend(['--stack', stack]) - command.extend(['--undercloud-key-file', get_key(stack=stack)]) - if ssh_user: - command.extend(['--ansible_ssh_user', ssh_user]) - if undercloud_connection: - command.extend(['--undercloud-connection', - undercloud_connection]) - if inventory_file: - command.extend(['--static-yaml-inventory', inventory_file]) - rc = run_command_and_log(LOG, command) - if rc != 0: - message = "Failed to generate inventory" - raise exceptions.InvalidConfiguration(message) - if os.path.exists(inventory_file): - if return_inventory_file_path: - return inventory_file - - with open(inventory_file, "r") as f: - inventory = f.read() - return inventory - - raise exceptions.InvalidConfiguration(_( - "Inventory file %s can not be found.") % inventory_file) - - -def cleanup_tripleo_ansible_inventory_file(path): - """Remove the static tripleo-ansible-inventory file from disk""" - if os.path.exists(path): - processutils.execute('/usr/bin/rm', '-f', path) - - -def get_roles_file_path(working_dir, stack_name): - roles_file = os.path.join( - working_dir, - constants.WD_DEFAULT_ROLES_FILE_NAME.format(stack_name)) - - return roles_file - - -def get_networks_file_path(working_dir, stack_name): - networks_file = os.path.join( - working_dir, - constants.WD_DEFAULT_NETWORKS_FILE_NAME.format(stack_name)) - - return networks_file - - -def get_baremetal_file_path(working_dir, stack_name): - baremetal_file_name = os.path.join( - working_dir, - constants.WD_DEFAULT_BAREMETAL_FILE_NAME.format(stack_name)) - baremetal_file = (baremetal_file_name - if os.path.exists(baremetal_file_name) else None) - - return baremetal_file - - -def get_vip_file_path(working_dir, stack_name): - vip_file = os.path.join( - working_dir, - constants.WD_DEFAULT_VIP_FILE_NAME.format(stack_name)) - - return vip_file - - -def rewrite_ansible_playbook_paths(src, dest): - """Rewrite relative paths to playbooks in the dest roles file, so that - the path is the absolute path relative to the src roles file - """ - with open(dest, 'r') as f: - wd_roles = yaml.safe_load(f.read()) - for role_idx, role in enumerate(wd_roles): - for pb_idx, pb_def in enumerate(role.get('ansible_playbooks', [])): - path = rel_or_abs_path_role_playbook(os.path.dirname(src), - pb_def['playbook']) - wd_roles[role_idx]['ansible_playbooks'][pb_idx][ - 'playbook'] = path - with open(dest, 'w') as f: - f.write(yaml.safe_dump(wd_roles)) - - -def copy_to_wd(working_dir, file, stack, kind): - src = os.path.abspath(file) - dest = os.path.join(working_dir, - constants.KIND_TEMPLATES[kind].format(stack)) - shutil.copy(src, dest) - if kind == 'baremetal': - rewrite_ansible_playbook_paths(src, dest) - - -def update_working_dir_defaults(working_dir, args): - stack_name = args.stack - tht_root = os.path.abspath(args.templates) - - if isinstance(args.baremetal_deployment, str): - copy_to_wd(working_dir, args.baremetal_deployment, stack_name, - 'baremetal') - - if args.roles_file: - copy_to_wd(working_dir, args.roles_file, stack_name, 'roles') - elif not os.path.exists( - os.path.join( - working_dir, - constants.WD_DEFAULT_ROLES_FILE_NAME.format(stack_name))): - file = os.path.join(tht_root, constants.OVERCLOUD_ROLES_FILE) - copy_to_wd(working_dir, file, stack_name, 'roles') - - if args.networks_file: - copy_to_wd(working_dir, args.networks_file, args.stack, 'networks') - elif not os.path.exists( - os.path.join( - working_dir, - constants.WD_DEFAULT_NETWORKS_FILE_NAME.format(stack_name))): - file = os.path.join(tht_root, constants.OVERCLOUD_NETWORKS_FILE) - copy_to_wd(working_dir, file, stack_name, 'networks') - - if args.vip_file: - copy_to_wd(working_dir, args.vip_file, args.stack, 'vips') - elif not os.path.exists( - os.path.join( - working_dir, - constants.WD_DEFAULT_VIP_FILE_NAME.format(stack_name))): - file = os.path.join(tht_root, constants.OVERCLOUD_VIP_FILE) - copy_to_wd(working_dir, file, stack_name, 'vips') - - -def build_stack_data(clients, stack_name, template, - files, env_files): - orchestration_client = clients.orchestration - fields = { - 'template': template, - 'files': files, - 'environment_files': env_files, - 'show_nested': True - } - stack_data = {} - result = orchestration_client.stacks.validate(**fields) - - if result: - stack_data['environment_parameters'] = result.get( - 'Environment', {}).get('parameter_defaults') - flattened = {'resources': {}, 'parameters': {}} - stack_utils._flat_it(flattened, 'Root', result) - stack_data['heat_resource_tree'] = flattened - - return stack_data - - -def archive_deploy_artifacts(log, stack_name, working_dir, ansible_dir=None): - """Create a tarball of the temporary folders used""" - log.debug(_("Preserving deployment artifacts")) - - def get_tar_filename(): - return os.path.join( - working_dir, '%s-install-%s.tar.bzip2' % - (stack_name, - datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S'))) - - def tar_filter(info): - """Tar filter to remove output dir from path""" - if info.name.endswith('.bzip2'): - return None - leading_path = working_dir[1:] + '/' - info.name = info.name.replace(leading_path, '') - return info - - tar_filename = get_tar_filename() - try: - tf = tarfile.open(tar_filename, 'w:bz2') - tf.add(working_dir, recursive=True, filter=tar_filter) - if ansible_dir: - tf.add(ansible_dir, recursive=True, - filter=tar_filter) - tf.close() - except tarfile.TarError as ex: - msg = _("Unable to create artifact tarball, %s") % str(ex) - log.warning(msg) - return tar_filename - - -def jinja_render_files(log, templates, working_dir, - roles_file=None, networks_file=None, - base_path=None, output_dir=None): - python_version = sys.version_info[0] - python_cmd = "python{}".format(python_version) - process_templates = os.path.join( - templates, 'tools/process-templates.py') - args = [python_cmd, process_templates] - args.extend(['--roles-data', roles_file]) - args.extend(['--network-data', networks_file]) - - if base_path: - args.extend(['-p', base_path]) - - if output_dir: - args.extend(['-o', output_dir]) - - if run_command_and_log(log, args, working_dir) != 0: - msg = _("Problems generating templates.") - log.error(msg) - raise exceptions.DeploymentError(msg) - - -def rewrite_env_path(env_path, tht_root, user_tht_root, log=None): - abs_env_path = os.path.abspath(env_path) - if (abs_env_path.startswith(user_tht_root) - and ((user_tht_root + '/') in env_path - or (user_tht_root + '/') in abs_env_path - or user_tht_root == abs_env_path - or user_tht_root == env_path)): - new_env_path = env_path.replace(user_tht_root + '/', tht_root + '/') - if log: - log.debug("Redirecting env file %s to %s" - % (abs_env_path, new_env_path)) - env_path = new_env_path - - return env_path, abs_env_path - - -def process_multiple_environments(created_env_files, tht_root, - user_tht_root, - env_files_tracker=None, - cleanup=True): - log = logging.getLogger(__name__ + ".process_multiple_environments") - env_files = {} - localenv = {} - include_env_in_files = env_files_tracker is not None - # Normalize paths for full match checks - user_tht_root = os.path.normpath(user_tht_root) - tht_root = os.path.normpath(tht_root) - for env_path in created_env_files: - log.debug("Processing environment files %s" % env_path) - env_path, abs_env_path = rewrite_env_path(env_path, tht_root, - user_tht_root, log=log) - try: - files, env = template_utils.process_environment_and_files( - env_path=env_path, include_env_in_files=include_env_in_files) - if env_files_tracker is not None: - env_files_tracker.append( - heat_utils.normalise_file_path_to_url(env_path)) - except hc_exc.CommandError as ex: - # This provides fallback logic so that we can reference files - # inside the resource_registry values that may be rendered via - # j2.yaml templates, where the above will fail because the - # file doesn't exist in user_tht_root, but it is in tht_root - # See bug https://bugs.launchpad.net/tripleo/+bug/1625783 - # for details on why this is needed (backwards-compatibility) - log.debug("Error %s processing environment file %s" - % (str(ex), env_path)) - # Use the temporary path as it's possible the environment - # itself was rendered via jinja. - with open(env_path, 'r') as f: - env_map = yaml.safe_load(f) - env_registry = env_map.get('resource_registry', {}) - env_dirname = os.path.dirname(os.path.abspath(env_path)) - for rsrc, rsrc_path in env_registry.items(): - # We need to calculate the absolute path relative to - # env_path not cwd (which is what abspath uses). - abs_rsrc_path = os.path.normpath( - os.path.join(env_dirname, rsrc_path)) - # If the absolute path matches user_tht_root, rewrite - # a temporary environment pointing at tht_root instead - if (abs_rsrc_path.startswith(user_tht_root) and - ((user_tht_root + '/') in abs_rsrc_path or - abs_rsrc_path == user_tht_root)): - new_rsrc_path = abs_rsrc_path.replace( - user_tht_root + '/', tht_root + '/') - log.debug("Rewriting %s %s path to %s" - % (env_path, rsrc, new_rsrc_path)) - env_registry[rsrc] = new_rsrc_path - else: - # Skip any resources that are mapping to OS::* - # resource names as these aren't paths - if not rsrc_path.startswith("OS::"): - env_registry[rsrc] = abs_rsrc_path - env_map['resource_registry'] = env_registry - f_name = os.path.basename(os.path.splitext(abs_env_path)[0]) - with tempfile.NamedTemporaryFile(dir=tht_root, - prefix="env-%s-" % f_name, - suffix=".yaml", - mode="w", - delete=cleanup) as f: - log.debug("Rewriting %s environment to %s" - % (env_path, f.name)) - f.write(yaml.safe_dump(env_map, default_flow_style=False)) - f.flush() - files, env = template_utils.process_environment_and_files( - env_path=f.name, include_env_in_files=include_env_in_files) - if env_files_tracker is not None: - env_files_tracker.append( - heat_utils.normalise_file_path_to_url(f.name)) - if files: - log.debug("Adding files %s for %s" % (files, env_path)) - env_files.update(files) - - # 'env' can be a deeply nested dictionary, so a simple update is - # not enough - localenv = template_utils.deep_update(localenv, env) - return env_files, localenv - - -def parse_extra_vars(extra_var_strings): - """Parses extra variables like Ansible would. - - Each element in extra_var_strings is like the raw value of -e - parameter of ansible-playbook command. It can either be very - simple 'key=val key2=val2' format or it can be '{ ... }' - representing a YAML/JSON object. - - The 'key=val key2=val2' format gets processed as if it was - '{"key": "val", "key2": "val2"}' object, and all YAML/JSON objects - get shallow-merged together in the order as they appear in - extra_var_strings, latter objects taking precedence over earlier - ones. - - :param extra_var_strings: unparsed value(s) of -e parameter(s) - :type extra_var_strings: list of strings - - :returns dict representing a merged object of all extra vars - """ - result = {} - - for extra_var_string in extra_var_strings: - invalid_yaml = False - - try: - parse_vars = yaml.safe_load(extra_var_string) - except yaml.YAMLError: - invalid_yaml = True - - if invalid_yaml or not isinstance(parse_vars, dict): - try: - parse_vars = dict( - item.split('=') for item in extra_var_string.split()) - except ValueError: - raise ValueError( - 'Invalid format for {extra_var_string}'.format( - extra_var_string=extra_var_string)) - - result.update(parse_vars) - - return result - - -def prepend_environment(environment_files, templates_dir, environment): - if not environment_files: - environment_files = [] - - full_path = os.path.join(templates_dir, environment) - # sanity check it exists before proceeding - if os.path.exists(full_path): - # We need to prepend before the files provided by user. - environment_files.insert(0, full_path) - else: - raise exceptions.InvalidConfiguration(_( - "Expected environment file {0} not found in {1} cannot proceed.") - .format(environment, templates_dir)) - - return environment_files - - -def get_hostname(short=False): - """Returns the local hostname - - :param (short): boolean true to run 'hostname -s' - :return string - """ - if short: - cmd = ["hostname", "-s"] - else: - cmd = ["hostname"] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - universal_newlines=True) - return p.communicate()[0].rstrip().lower() - - -def get_short_hostname(): - """Returns the local short hostname - - :return string - """ - return get_hostname(short=True) - - -def wait_api_port_ready(api_port, host='127.0.0.1'): - """Wait until an http services becomes available - - :param api_port: api service port - :type api_port: integer - - :param host: host running the service (default: 127.0.0.1) - :type host: string - - :return boolean - """ - log = logging.getLogger(__name__ + ".wait_api_port_ready") - urlopen_timeout = 1 - max_retries = 30 - count = 0 - while count < max_retries: - time.sleep(1) - count += 1 - try: - request.urlopen( - "http://%s:%s/" % (host, api_port), timeout=urlopen_timeout) - return False - except url_error.HTTPError as he: - if he.code == 300: - return True - pass - except url_error.URLError: - pass - except socket.timeout: - log.warning( - "Timeout at attempt {} of {} after {}s waiting for API port..." - .format(count, max_retries, urlopen_timeout)) - pass - raise RuntimeError( - "wait_api_port_ready: Max retries {} reached".format(max_retries)) - - -def bulk_symlink(log, src, dst, tmpd='/tmp'): - """Create bulk symlinks from a directory - - :param log: logger instance for logging - :type log: Logger - - :param src: dir of directories to symlink - :type src: string - - :param dst: dir to create the symlinks - :type dst: string - - :param tmpd: temporary working directory to use - :type tmp: string - """ - log.debug("Symlinking %s to %s, via temp dir %s" % - (src, dst, tmpd)) - - makedirs(dst) - with TempDirs(dir_path=tmpd) as tmp: - for obj in os.listdir(src): - if not os.path.exists(os.path.join(dst, obj)): - tmpf = os.path.join(tmp, obj) - os.symlink(os.path.join(src, obj), tmpf) - os.rename(tmpf, os.path.join(dst, obj)) - - -def run_command_and_log(log, cmd, cwd=None, env=None): - """Run command and log output - - :param log: logger instance for logging - :type log: Logger - - :param cmd: command in list form - :type cmd: List - - :param cwd: current working directory for execution - :type cmd: String - - :param env: modified environment for command run - :type env: List - """ - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=False, - cwd=cwd, env=env) - while True: - try: - line = proc.stdout.readline() - except StopIteration: - break - if line != b'': - if isinstance(line, bytes): - line = line.decode('utf-8') - log.warning(line.rstrip()) - else: - break - proc.stdout.close() - return proc.wait() - - -def build_prepare_env(environment_files, environment_directories): - '''Build the environment for container image prepare - - :param environment_files: List of environment files to build - environment from - :type environment_files: list - - :param environment_directories: List of environment directories to build - environment from - :type environment_directories: list - ''' - env_files = [] - - if environment_directories: - env_files.extend(load_environment_directories( - environment_directories)) - if environment_files: - env_files.extend(environment_files) - - def get_env_file(method, path): - if not os.path.exists(path): - return '{}' - env_url = heat_utils.normalise_file_path_to_url(path) - return request.urlopen(env_url).read() - - return ( - template_utils.process_multiple_environments_and_files( - env_files, - env_path_is_object=lambda path: True, - object_request=get_env_file - ) - )[1] - - -def rel_or_abs_path(file_path, tht_root): - '''Find a file, either absolute path or relative to the t-h-t dir''' - if not file_path: - return None - path = os.path.abspath(file_path) - if not os.path.isfile(path): - path = os.path.abspath(os.path.join(tht_root, file_path)) - if not os.path.isfile(path): - raise exceptions.DeploymentError( - "Can't find path %s %s" % (file_path, path)) - return path - - -def fetch_roles_file(roles_file, tht_path=constants.TRIPLEO_HEAT_TEMPLATES): - '''Fetch t-h-t roles data fromm roles_file abs path or rel to tht_path.''' - if not roles_file: - return None - with open(rel_or_abs_path(roles_file, tht_path)) as f: - return yaml.safe_load(f) - - -def load_config(osloconf, path): - '''Load oslo config from a file path. ''' - log = logging.getLogger(__name__ + ".load_config") - conf_params = [] - if os.path.isfile(path): - conf_params += ['--config-file', path] - else: - log.warning(_('%s does not exist. Using defaults.') % path) - osloconf(conf_params) - - -def configure_logging(log, level, log_file): - '''Mimic oslo_log default levels and formatting for the logger. ''' - fhandler = logging.FileHandler(log_file) - formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [ ] %(message)s', - '%Y-%m-%d %H:%M:%S') - - if level > 1: - log.setLevel(logging.DEBUG) - fhandler.setLevel(logging.DEBUG) - else: - # NOTE(bogdando): we are making an exception to the oslo_log'ish - # default WARN level to have INFO logs as well. Some modules - # produce INFO msgs we want to see and keep by default, like - # pre-flight valiation notes. - log.setLevel(logging.INFO) - fhandler.setLevel(logging.INFO) - - fhandler.setFormatter(formatter) - log.addHandler(fhandler) - - -def _name_helper(basename, arch=None, platform=None, use_subdir=False): - # NOTE(tonyb): We don't accept a platform with an arch. This caught when - # import the nodes / process args, but lets be a little cautious here - # anyway. - if use_subdir: - delim = '/' - else: - delim = '-' - - if arch and platform: - basename = platform + '-' + arch + delim + basename - elif arch: - basename = arch + delim + basename - return basename - - -def overcloud_kernel(basename, arch=None, platform=None, - use_subdir=False): - return (_name_helper('%s-vmlinuz' % basename, arch=arch, - platform=platform, use_subdir=use_subdir), - '.vmlinuz') - - -def overcloud_ramdisk(basename, arch=None, platform=None, - use_subdir=False): - return (_name_helper('%s-initrd' % basename, arch=arch, - platform=platform, use_subdir=use_subdir), - '.initrd') - - -def overcloud_image(basename, arch=None, platform=None, - use_subdir=False): - return (_name_helper(basename, arch=arch, platform=platform, - use_subdir=use_subdir), - '.raw') - - -def deploy_kernel(basename='agent', arch=None, platform=None, - use_subdir=True): - return _name_helper(basename, arch=arch, platform=platform, - use_subdir=use_subdir) + '.kernel' - - -def deploy_ramdisk(basename='agent', arch=None, platform=None, - use_subdir=True): - return _name_helper(basename, arch=arch, platform=platform, - use_subdir=use_subdir) + '.ramdisk' - - -def _candidate_files(node, call): - arch = node.get('arch') - platform = node.get('platform') - - if arch: - if platform: - yield call(arch=arch, platform=platform) - yield call(arch=arch) - - yield call() - - -def update_nodes_deploy_data(nodes, - http_boot=constants.IRONIC_HTTP_BOOT_BIND_MOUNT): - """Add specific kernel and ramdisk IDs to a node. - - Look at all images and update node data with the most specific - deploy_kernel and deploy_ramdisk for the architecture/platform combination. - """ - for node in nodes: - - # NOTE(tonyb): Check to see if we have a specific kernel for this node - # and use that. Fall back to the generic image. - if 'kernel_id' not in node: - kernel_locations = list(_candidate_files(node, deploy_kernel)) - - for kernel in kernel_locations: - path = os.path.join(http_boot, kernel) - if os.path.exists(path): - node['kernel_id'] = 'file://%s/%s' % ( - http_boot, - kernel) - break - else: - raise RuntimeError('No kernel image provided and none of %s ' - 'found in %s' % (kernel_locations, - http_boot)) - - # NOTE(tonyb): As above except for ramdisks - if 'ramdisk_id' not in node: - ramdisk_locations = list(_candidate_files(node, deploy_ramdisk)) - - for ramdisk in ramdisk_locations: - path = os.path.join(http_boot, ramdisk) - if os.path.exists(path): - node['ramdisk_id'] = 'file://%s/%s' % ( - http_boot, - ramdisk) - break - else: - raise RuntimeError('No ramdisk image provided and none of %s ' - 'found in %s' % (ramdisk_locations, - http_boot)) - - -def get_deployment_python_interpreter(parsed_args): - """Return correct deployment python interpreter """ - if parsed_args.deployment_python_interpreter: - return parsed_args.deployment_python_interpreter - return sys.executable - - -def run_command(args, env=None, name=None, logger=None): - """Run the command defined by args and return its output - - :param args: List of arguments for the command to be run. - :param env: Dict defining the environment variables. Pass None to use - the current environment. - :param name: User-friendly name for the command being run. A value of - None will cause args[0] to be used. - """ - if logger is None: - logger = LOG - if name is None: - name = args[0] - try: - output = subprocess.check_output(args, - stderr=subprocess.STDOUT, - env=env) - if isinstance(output, bytes): - output = output.decode('utf-8') - return output - except subprocess.CalledProcessError as e: - message = '%s failed: %s' % (name, e.output) - logger.error(message) - raise RuntimeError(message) - - -def set_hostname(hostname): - """Set system hostname to provided hostname - - :param hostname: The hostname to set - """ - args = ['sudo', 'hostnamectl', 'set-hostname', hostname] - return run_command(args, name='hostnamectl') - - -def check_hostname(fix_etc_hosts=True, logger=None): - """Check system hostname configuration - - Rabbit and Puppet require pretty specific hostname configuration. This - function ensures that the system hostname settings are valid before - continuing with the installation. - - :param fix_etc_hosts: Boolean to to enable adding hostname to /etc/hosts - if not found. - """ - if logger is None: - logger = LOG - logger.info('Checking for a FQDN hostname...') - args = ['hostnamectl', '--static'] - detected_static_hostname = run_command(args, name='hostnamectl').rstrip() - logger.info('Static hostname detected as %s', detected_static_hostname) - args = ['hostnamectl', '--transient'] - detected_transient_hostname = run_command(args, - name='hostnamectl').rstrip() - logger.info('Transient hostname detected as %s', - detected_transient_hostname) - if detected_static_hostname != detected_transient_hostname: - logger.error('Static hostname "%s" does not match transient hostname ' - '"%s".', detected_static_hostname, - detected_transient_hostname) - logger.error('Use hostnamectl to set matching hostnames.') - raise RuntimeError('Static and transient hostnames do not match') - short_hostname = detected_static_hostname.split('.')[0] - if short_hostname == detected_static_hostname: - message = _('Configured hostname is not fully qualified.') - logger.error(message) - raise RuntimeError(message) - with open('/etc/hosts') as hosts_file: - for line in hosts_file: - # check if hostname is in /etc/hosts - if (not line.lstrip().startswith('#') and - detected_static_hostname in line.split()): - break - else: - # hostname not found, add it to /etc/hosts - if not fix_etc_hosts: - return - sed_cmd = (r'sed -i "s/127.0.0.1\(\s*\)/127.0.0.1\\1%s %s /" ' - '/etc/hosts' % - (detected_static_hostname, short_hostname)) - args = ['sudo', '/bin/bash', '-c', sed_cmd] - run_command(args, name='hostname-to-etc-hosts') - logger.info('Added hostname %s to /etc/hosts', - detected_static_hostname) - - -def check_env_for_proxy(no_proxy_hosts=None): - """Check env proxy settings - - :param no_proxy_hosts: array of hosts to check if in no_proxy env var - """ - if no_proxy_hosts is None: - no_proxy_hosts = ['127.0.0.1'] - http_proxy = os.environ.get('http_proxy') - https_proxy = os.environ.get('https_proxy') - if os.environ.get('no_proxy'): - no_proxy = os.environ.get('no_proxy').split(',') - else: - no_proxy = [] - missing_hosts = [] - if http_proxy or https_proxy: - missing_hosts = set(no_proxy_hosts) - set(no_proxy) - if missing_hosts: - message = _('http_proxy or https_proxy is set but the following local ' - 'addresses "{}" may be missing from the no_proxy ' - 'environment variable').format(','.join(missing_hosts)) - raise RuntimeError(message) - - -def get_read_config(cfg): - """Return the config read from ini config file(s)""" - config = configparser.ConfigParser() - config.read(cfg) - return config - - -def getboolean_from_cfg(cfg, param, section="DEFAULT"): - """Return a parameter from Kolla config""" - return _get_from_cfg(cfg, cfg.getboolean, param, section) - - -def get_from_cfg(cfg, param, section="DEFAULT"): - """Return a parameter from Kolla config""" - return _get_from_cfg(cfg, cfg.get, param, section) - - -def _get_from_cfg(cfg, accessor, param, section): - """Return a parameter from Kolla config""" - try: - val = accessor(section, param) - except (ValueError, configparser.Error): - raise exceptions.NotFound(_("Unable to find {section}/{option} in " - "{config}").format(section=param, - option=section, - config=cfg)) - return val - - -def get_local_timezone(): - info = run_command(['timedatectl'], name='timedatectl') - timezoneline = [tz for tz in info.split('\n') if 'Time zone:' in tz] - if not timezoneline: - LOG.warning('Unable to determine timezone, using UTC') - return 'UTC' - # The line returned is "[whitespace]Time zone: [timezone] ([tz], [offset])" - try: - timezone = timezoneline[0].strip().split(' ')[2] - except IndexError: - LOG.error('Unable to parse timezone from timedatectl, using UTC') - timezone = 'UTC' - return timezone - - -def check_file_for_enabled_service(env_file): - """Checks environment file for the said service. - - If stack to be to be deployed/updated/upgraded has any deprecated service - enabled, throw a warning about its deprecation and ask the user - whether to proceed with deployment despite deprecation. - For ODL as an example: - - :param env_file: The path of the environment file - :type env_file: String - - :raises CommandError: If the action is not confirmed - """ - if os.path.exists(env_file): - with open(env_file, "r") as f: - content = yaml.safe_load(f) - deprecated_services_enabled = [] - for service in constants.DEPRECATED_SERVICES.keys(): - try: - if content["resource_registry"][service] != "OS::Heat::None": - LOG.warning("service " + service + " is enabled in " - + str(env_file) + ". " + - constants.DEPRECATED_SERVICES[service]) - deprecated_services_enabled.append(service) - except (KeyError, TypeError): - # ignore if content["resource_registry"] is empty - pass - if deprecated_services_enabled: - confirm = prompt_user_for_confirmation( - message="Do you still wish to continue with deployment [y/N]", - logger=LOG) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - -def check_deprecated_service_is_enabled(environment_files): - for env_file in environment_files: - check_file_for_enabled_service(env_file) - - -def reset_cmdline(): - """Run reset to cleanup cmdline""" - # only try to reset if stdout is a terminal, skip if not (e.g. CI) - if not sys.stdout.isatty(): - return - output = '' - try: - output = run_command(['reset', '-I']) - except RuntimeError: - LOG.warning('Unable to reset command line. Try manually running ' - '"reset" if the command line is broken.') - sys.stdout.write(output) - sys.stdout.flush() - - -def safe_write(path, data): - '''Write to disk and exit safely if can not write correctly.''' - log = logging.getLogger(__name__ + ".safe_write") - - if os.path.exists(path): - log.warning( - "The output file %s will be overriden", - path - ) - - try: - data = data.decode('utf-8', 'ignore') - except (UnicodeDecodeError, AttributeError): - pass - - try: - with os.fdopen(os.open(path, - os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o666), - 'w') as f: - f.write(data) - except OSError as error: - if error.errno != errno.EEXIST: - msg = _( - 'The output file {file} can not be created. Error: {msg}' - ).format(file=path, msg=str(error)) - raise oscexc.CommandError(msg) - - -def copy_clouds_yaml(user): - """Copy clouds.yaml file from /etc/openstack to deployment user's home - - :param user: deployment user - """ - clouds_etc_file = '/etc/openstack/clouds.yaml' - clouds_home_dir = os.path.expanduser("~{}".format(user)) - clouds_config_dir = os.path.join(clouds_home_dir, '.config') - clouds_openstack_config_dir = os.path.join(clouds_config_dir, - 'openstack') - clouds_config_file = os.path.join(clouds_openstack_config_dir, - 'clouds.yaml') - clouds_user_id = os.stat(clouds_home_dir).st_uid - clouds_group_id = os.stat(clouds_home_dir).st_gid - - # If the file doesn't exist, we don't need to copy - # /etc/openstack/clouds.yaml to the user directory. - if not os.path.isfile(clouds_etc_file): - return - - if not os.path.exists(clouds_openstack_config_dir): - try: - os.makedirs(clouds_openstack_config_dir) - except OSError as e: - messages = _("Unable to create credentials directory: " - "{0}, {1}").format(clouds_openstack_config_dir, e) - raise OSError(messages) - - # Using 'sudo' here as for the overcloud the deployment command is run - # from regular deployment user. - cp_args = ['sudo', 'cp', clouds_etc_file, clouds_openstack_config_dir] - if run_command_and_log(LOG, cp_args) != 0: - msg = _('Error when user %(user)s tried to copy %(src)s to %(dest)s' - ' with sudo') % {'user': user, 'src': clouds_etc_file, - 'dest': clouds_openstack_config_dir} - LOG.error(msg) - raise exceptions.DeploymentError(msg) - chmod_args = ['sudo', 'chmod', '0600', clouds_config_file] - if run_command_and_log(LOG, chmod_args) != 0: - msg = _('Error when user %(user)s tried to chmod %(file)s file' - ' with sudo') % {'user': user, 'file': clouds_config_file} - LOG.error(msg) - raise exceptions.DeploymentError(msg) - chown_args = ['sudo', 'chown', '-R', - str(clouds_user_id) + ':' + str(clouds_group_id), - clouds_config_dir] - if run_command_and_log(LOG, chown_args) != 0: - msg = _('Error when user %(user)s tried to chown %(dir)s directory' - ' with sudo') % {'user': user, 'dir': clouds_config_dir} - LOG.error(msg) - raise exceptions.DeploymentError(msg) - - -def get_status_yaml(stack_name, working_dir): - status_yaml = os.path.join( - working_dir, - '%s-deployment_status.yaml' % stack_name) - return status_yaml - - -def update_deployment_status(stack_name, status, working_dir): - """Update the deployment status.""" - - contents = yaml.safe_dump( - {'deployment_status': status}, - default_flow_style=False) - - safe_write(get_status_yaml(stack_name, working_dir), - contents) - - -def create_breakpoint_cleanup_env(tht_root, stack): - bp_env = {} - update.add_breakpoints_cleanup_into_env(bp_env) - env_path = write_user_environment( - bp_env, - 'tripleoclient-breakpoint-cleanup.yaml', - tht_root, - stack) - return [env_path] - - -def create_parameters_env(parameters, tht_root, stack, - env_file='tripleoclient-parameters.yaml'): - parameter_defaults = {"parameter_defaults": parameters} - env_path = write_user_environment( - parameter_defaults, - env_file, - tht_root, - stack) - return [env_path] - - -def build_user_env_path(abs_env_path, tht_root): - env_dirname = os.path.dirname(abs_env_path) - user_env_dir = os.path.join( - tht_root, 'user-environments', env_dirname[1:]) - user_env_path = os.path.join( - user_env_dir, os.path.basename(abs_env_path)) - makedirs(user_env_dir) - return user_env_path - - -def write_user_environment(env_map, abs_env_path, tht_root, - stack): - # We write the env_map to the local /tmp tht_root and also - # to the swift plan container. - contents = yaml.safe_dump(env_map, default_flow_style=False) - user_env_path = build_user_env_path(abs_env_path, tht_root) - LOG.debug("user_env_path=%s" % user_env_path) - with open(user_env_path, 'w') as f: - LOG.debug("Writing user environment %s" % user_env_path) - f.write(contents) - return user_env_path - - -def launch_heat(launcher=None, restore_db=False, heat_type='pod'): - - global _local_orchestration_client - global _heat_pid - - if _local_orchestration_client: - print("returning cached") - return _local_orchestration_client - - if not launcher: - launcher = get_heat_launcher(heat_type) - - _heat_pid = 0 - if launcher.heat_type == 'native': - _heat_pid = os.fork() - if _heat_pid == 0: - launcher.check_database() - launcher.check_message_bus() - launcher.heat_db_sync(restore_db) - launcher.launch_heat() - - # Wait for the API to be listening - heat_api_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - test_heat_api_port(heat_api_socket, launcher.host, int(launcher.api_port)) - if launcher.heat_type == 'pod': - launcher.wait_for_message_queue() - - _local_orchestration_client = tc_heat_utils.local_orchestration_client( - launcher.host, launcher.api_port) - return _local_orchestration_client - - -@retry(stop=(stop_after_delay(10) | stop_after_attempt(10)), - wait=wait_fixed(0.5)) -def test_heat_api_port(heat_api_socket, host, port): - heat_api_socket.connect((host, port)) - - -def get_heat_launcher(heat_type, *args, **kwargs): - if heat_type == 'native': - return heat_launcher.HeatNativeLauncher(*args, **kwargs) - if heat_type == 'container': - return heat_launcher.HeatContainerLauncher(*args, **kwargs) - return heat_launcher.HeatPodLauncher(*args, **kwargs) - - -def kill_heat(launcher): - global _heat_pid - if _heat_pid: - LOG.debug("Attempting to kill heat pid %s" % _heat_pid) - launcher.kill_heat(_heat_pid) - - -def rm_heat(launcher, backup_db=True): - launcher.rm_heat(backup_db) - - -def get_default_working_dir(stack): - return os.path.join( - os.path.expanduser('~'), - "overcloud-deploy", stack) - - -def get_ctlplane_attrs(): - try: - conn = openstack.connect('undercloud') - except openstack.exceptions.ConfigException: - return dict() - - if not conn.endpoint_for('network'): - return dict() - - network = conn.network.find_network('ctlplane') - if network is None: - return dict() - - net_attributes_map = {'network': dict(), 'subnets': dict()} - - net_attributes_map['network'].update({ - 'name': network.name, - 'mtu': network.mtu, - 'dns_domain': network.dns_domain, - 'tags': network.tags, - }) - - for subnet_id in network.subnet_ids: - subnet = conn.network.get_subnet(subnet_id) - net_attributes_map['subnets'].update({ - subnet.name: { - 'name': subnet.name, - 'cidr': subnet.cidr, - 'gateway_ip': subnet.gateway_ip, - 'host_routes': subnet.host_routes, - 'dns_nameservers': subnet.dns_nameservers, - 'ip_version': subnet.ip_version, - } - }) - - return net_attributes_map - - -def cleanup_host_entry(entry): - # remove any tab or space excess - entry_stripped = re.sub('[ \t]+', ' ', str(entry).rstrip()) - # removes any duplicate identical lines - unique_lines = list(set(entry_stripped.splitlines())) - ret = '' - for line in unique_lines: - # remove any duplicate word - hosts_unique = (' '.join( - collections.OrderedDict((w, w) for w in line.split()).keys())) - if hosts_unique != '': - ret += hosts_unique + '\n' - return ret.rstrip('\n') - - -def get_undercloud_host_entry(): - """Get hosts entry for undercloud ctlplane network - - The host entry will be added on overcloud nodes - """ - ctlplane_hostname = '.'.join([get_short_hostname(), 'ctlplane']) - cmd = ['getent', 'hosts', ctlplane_hostname] - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, - universal_newlines=True) - out, err = process.communicate() - if process.returncode != 0: - raise exceptions.DeploymentError('No entry for %s in /etc/hosts' - % ctlplane_hostname) - return cleanup_host_entry(out) - - -def get_roles_data(working_dir, stack_name): - abs_roles_file = get_roles_file_path(working_dir, stack_name) - with open(abs_roles_file, 'r') as fp: - roles_data = yaml.safe_load(fp) - - return roles_data - - -def build_enabled_sevices_image_params(env_files, parsed_args, - new_tht_root, user_tht_root, - working_dir): - params = dict() - if parsed_args.environment_directories: - env_files.extend(load_environment_directories( - parsed_args.environment_directories)) - if parsed_args.environment_files: - env_files.extend(parsed_args.environment_files) - - _, env = process_multiple_environments( - env_files, new_tht_root, user_tht_root, - cleanup=(not parsed_args.no_cleanup)) - - roles_data = get_roles_data(working_dir, parsed_args.stack) - - params.update(kolla_builder.get_enabled_services(env, roles_data)) - params.update(plan_utils.default_image_params()) - - if parsed_args.disable_container_prepare: - return params - - params.update( - kolla_builder.container_images_prepare_multi( - env, roles_data, - dry_run=True) - ) - - for role in roles_data: - # NOTE(tkajinam): If a role-specific container image prepare - # parameter is set, run the image prepare process - # with the overridden environment - role_param = '%sContainerImagePrepare' % role['name'] - if env.get('parameter_defaults', {}).get(role_param): - tmp_env = copy.deepcopy(env) - tmp_env['parameter_defaults']['ContainerImagePrepare'] = ( - env['parameter_defaults'][role_param] - ) - - # NOTE(tkajinam): Put the image parameters as role-specific - # parameters - params['%sParameters' % role['name']] = ( - kolla_builder.container_images_prepare_multi( - tmp_env, [role], dry_run=True) - ) - - return params - - -def copy_env_files(files_dict, tht_root): - file_prefix = "file://" - - for full_path in files_dict.keys(): - if not full_path.startswith(file_prefix): - continue - - path = full_path[len(file_prefix):] - - if path.startswith(tht_root): - continue - - relocate_path = os.path.join(tht_root, "user-environments", - os.path.basename(path)) - safe_write(relocate_path, files_dict[full_path]) - - -def is_network_data_v2(networks_file_path): - """Parse the network data, if any network have 'ip_subnet' or - 'ipv6_subnet' keys this is not a network-v2 format file. - - :param networks_file_path: - :return: boolean - """ - with open(networks_file_path, 'r') as f: - network_data = yaml.safe_load(f.read()) - - if isinstance(network_data, list): - for network in network_data: - if 'ip_subnet' in network or 'ipv6_subnet' in network: - return False - - return True - - -def rel_or_abs_path_role_playbook(roles_file_dir, playbook): - if os.path.isabs(playbook): - playbook_path = playbook - else: - # Load for playbook relative to the roles file - playbook_path = os.path.join(roles_file_dir, playbook) - - return playbook_path - - -def validate_roles_playbooks(roles_file_dir, roles): - not_found = [] - playbooks = [] - for role in roles: - playbooks.extend(role.get('ansible_playbooks', [])) - - for x in playbooks: - path = rel_or_abs_path_role_playbook(roles_file_dir, x['playbook']) - if not os.path.exists(path) or not os.path.isfile(path): - not_found.append(path) - - if not_found: - raise exceptions.InvalidPlaybook( - 'Invalid Playbook(s) {}, file(s) not found.'.format( - ', '.join(not_found))) - - -def run_role_playbook(self, inventory, relative_dir, playbook, - limit_hosts=None, extra_vars=dict()): - playbook_path = rel_or_abs_path_role_playbook(relative_dir, playbook) - playbook_dir = os.path.dirname(playbook_path) - - with TempDirs() as tmp: - run_ansible_playbook( - playbook=playbook_path, - inventory=inventory, - workdir=tmp, - playbook_dir=playbook_dir, - verbosity=playbook_verbosity(self=self), - limit_hosts=limit_hosts, - extra_vars=extra_vars, - ) - - -def run_role_playbooks(self, working_dir, roles_file_dir, roles, - network_config=True): - inventory_file = os.path.join(working_dir, - 'tripleo-ansible-inventory.yaml') - with open(inventory_file, 'r') as f: - inventory = yaml.safe_load(f.read()) - - growvols_play = 'cli-overcloud-node-growvols.yaml' - growvols_path = rel_or_abs_path_role_playbook( - constants.ANSIBLE_TRIPLEO_PLAYBOOKS, growvols_play) - - # Pre-Network Config - for role in roles: - if role.get('count', 1) == 0: - continue - - role_playbooks = [] - - for x in role.get('ansible_playbooks', []): - role_playbooks.append(x['playbook']) - - run_role_playbook(self, inventory, roles_file_dir, x['playbook'], - limit_hosts=role['name'], - extra_vars=x.get('extra_vars', {})) - - if growvols_path not in role_playbooks: - # growvols was not run with custom extra_vars, run it with defaults - run_role_playbook(self, inventory, - constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - growvols_play, - limit_hosts=role['name']) - - if network_config: - # Network Config - run_role_playbook(self, inventory, constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - 'cli-overcloud-node-network-config.yaml') - - -def create_archive_dir(archive_dir=constants.TRIPLEO_ARCHIVE_DIR): - """Create the TripleO archive directory as root. The directory is created - in a location typically owned by root (/var/lib), and remains owned as root - to decrease the chance it is accidentally deleted by a normal user. - - :param archive_dir: The archive directory to create - :type archive_dir: string - - :return: None - """ - return run_command(['sudo', 'mkdir', '-p', archive_dir]) - - -def extend_protected_overrides(protected_overrides, output_path): - with open(output_path, 'r') as env_file: - data = yaml.safe_load(env_file.read()) - - protect_registry = protected_overrides['registry_entries'] - resource_registry = data.get('resource_registry', {}) - - for reg_entry in resource_registry.keys(): - protect_registry.setdefault(reg_entry, []).append(output_path) - - -def check_prohibited_overrides(protected_overrides, user_environments): - found_conflict = False - protected_registry = protected_overrides['registry_entries'] - msg = ("ERROR: Protected resource registry overrides detected! These " - "entries are used in internal environments and should not be " - "overridden in the user environment. Please remove these overrides " - "from the environment files.\n") - for env_path, abs_env_path in user_environments: - with open(env_path, 'r') as file: - data = yaml.safe_load(file.read()) - - _resource_registry = data.get('resource_registry') - if isinstance(_resource_registry, dict): - registry = set(_resource_registry.keys()) - else: - registry = set() - - conflicts = set(protected_registry.keys()).intersection(registry) - if not conflicts: - continue - - found_conflict = True - for x in conflicts: - msg += ("Conflict detected for resource_registry entry: {}.\n" - "\tUser environment: {}.\n" - "\tInternal environment: {}\n").format( - x, abs_env_path, protected_registry[x]) - - if found_conflict: - raise exceptions.DeploymentError(msg) - - -def parse_container_image_prepare(tht_key='ContainerImagePrepare', - keys=[], source=None, - push_sub_keys=[]): - """Extracts key/value pairs from list of keys in source file - If keys=[foo,bar] and source is the following, - then return {foo: 1, bar: 2} - - parameter_defaults: - ContainerImagePrepare: - - tag_from_label: grault - push_destination: quux.com - set: - foo: 1 - bar: 2 - namespace: quay.io/garply - ContainerImageRegistryCredentials: - 'quay.io': {'quay_username': 'quay_password'} - - If push_destination tag is present as above and push_sub_keys - contains 'namespace', then the returned dictionary d will - contain d['namespace'] = 'quux.com/garply'. - - Alternatively, if tht_key='ContainerImageRegistryCredentials' and - keys=['quay.io/garply'] for the above, then return the following: - - {'registry_url': 'quay.io', - 'registry_username': 'quay_username', - 'registry_password': 'quay_password'} - - If the tht_key is not found, return an empty dictionary - - :param tht_key: string of a THT parameter (only 2 options) - :param keys: list of keys to extract - :param source: (string) path to container_image_prepare_defaults.yaml - :param push_sub_keys: list of keys to have substitutions if push_desination - - :return: dictionary - """ - image_map = {} - if source is None: - source = kolla_builder.DEFAULT_PREPARE_FILE - if not os.path.exists(source): - raise RuntimeError( - "Path to container image prepare defaults file " - "not found: %s." % os.path.abspath(source)) - with open(source, 'r') as stream: - try: - images = yaml.safe_load(stream) - except yaml.YAMLError as exc: - raise RuntimeError( - "yaml.safe_load(%s) returned '%s'" % (source, exc)) - - if tht_key == 'ContainerImagePrepare': - try: - push = '' - tag_list = images['parameter_defaults'][tht_key] - for key in keys: - for tag in tag_list: - if 'push_destination' in tag: - # substitute discovered registry - # if push_destination is set to true - if isinstance(tag['push_destination'], bool) and \ - tag['push_destination']: - push = image_uploader.get_undercloud_registry() - if len(push_sub_keys) > 0: - image_map['push_destination_boolean'] = True - elif isinstance(tag['push_destination'], str): - push = tag['push_destination'] - if len(push_sub_keys) > 0: - image_map['push_destination_boolean'] = True - elif len(push_sub_keys) > 0: - image_map['push_destination_boolean'] = False - if 'set' in tag: - if key in tag['set']: - image_map[key] = tag['set'][key] - if len(push) > 0 and key in push_sub_keys: - # replace the host portion of the imagename - # with the push_destination, since that is - # where they will be uploaded to - image = image_map[key].partition('/')[2] - image_map[key] = os.path.normpath( - os.path.join(push, image)) - except KeyError: - raise RuntimeError( - "The expected parameter_defaults and %s are not " - "defined in data file: %s" % (tht_key, source)) - elif tht_key == 'ContainerImageRegistryCredentials': - try: - tag_list = images['parameter_defaults'][tht_key] - for key in keys: - for tag in tag_list: - registry = url_parse.urlparse(key).netloc - if len(registry) == 0: - registry = url_parse.urlparse('//' + key).netloc - if tag == registry: - if isinstance(tag_list[registry], - collections.abc.Mapping): - credentials = tag_list[registry].popitem() - image_map['registry_username'] = credentials[0] - image_map['registry_password'] = credentials[1] - image_map['registry_url'] = registry - except KeyError: - LOG.info("Unable to parse %s from %s. " - "Assuming the container registry does not " - "require authentication or that the " - "registry URL, username and password " - "will be passed another way." - % (tht_key, source)) - else: - raise RuntimeError("Unsupported tht_key: %s" % tht_key) - return image_map - - -def get_parameter_file(path): - """Retrieve parameter json file from the supplied path. - If the file doesn't exist, or if the decoding fails, log the failure - and return `None`. - :param path: path to the parameter file - :dtype path: `string` - """ - file_data = None - if os.path.exists(path): - with open(path, 'r') as parameter_file: - try: - file_data = json.load(parameter_file) - except (TypeError, json.JSONDecodeError) as e: - LOG.error( - _('Could not read file %s') % path) - LOG.error(e) - else: - LOG.warning('File %s was not found during export' % - path) - return file_data - - -def parse_ansible_inventory(inventory_file): - """ Retrieve a list of hosts from a defined ansible inventory file. - :param inventory_file: Ansible inventory file - :return: list of strings: names of hosts in the inventory - """ - - json_inv, err = ansible_runner.interface.get_inventory('list', - [inventory_file], - quiet=True) - if err: - msg = 'Error parsing inventory {}:\n{}'.format(inventory_file, err) - raise ansible_runner.exceptions.AnsibleRunnerException(msg) - - inventory = json.loads(json_inv) - hosts = list(inventory['_meta']['hostvars'].keys()) - - return hosts - - -def save_stack(stack, working_dir): - if not stack: - return - outputs_dir = os.path.join(working_dir, 'outputs') - makedirs(outputs_dir) - for output in constants.STACK_OUTPUTS: - val = get_stack_output_item(stack, output) - output_path = os.path.join(outputs_dir, output) - with open(output_path, 'w') as f: - f.write(yaml.dump(val)) - env_dir = os.path.join(working_dir, 'environment') - makedirs(env_dir) - env = stack.environment() - env_path = os.path.join( - env_dir, - constants.STACK_ENV_FILE_NAME.format(stack.stack_name)) - with open(env_path, 'w') as f: - f.write(yaml.dump(env)) - - -def get_saved_stack_env(working_dir, stack_name): - env_path = os.path.join( - working_dir, 'environment', - constants.STACK_ENV_FILE_NAME.format(stack_name)) - if not os.path.isfile(env_path): - return None - with open(env_path) as f: - return yaml.safe_load(f.read()) - - -def get_ceph_networks(network_data_path, - public_network_name, - cluster_network_name): - """Get {public,cluster}_network{,_name} from network_data_path file - :param network_data_path: the path to a network_data.yaml file - :param str public_network_name: name of public_network, e.g. storage - :param str cluster_network_name: name of cluster_network, e.g. storage_mgmt - :return: dict mapping two network names and two CIDRs for cluster + public - with ms_bind_ipv4 and ms_bind_ipv6 booleans set. - - The network_data_path is searched for networks with name_lower values of - storage and storage_mgmt by default. If none found, then search repeats - but with service_net_map_replace in place of name_lower. The params - public_network_name or cluster_network_name override name of the searched - for network from storage or storage_mgmt so a customized name may be used. - The public_network and cluster_network (without '_name') are the subnets - for each network, e.g. 192.168.24.0/24, as mapped by the ip_subnet key. - If the found network has >1 subnet, all ip_subnets are combined. - """ - # default to ctlplane if nothing found in network_data - storage_net_map = {} - storage_net_map['public_network_name'] = constants.CTLPLANE_NET_NAME - storage_net_map['cluster_network_name'] = constants.CTLPLANE_NET_NAME - storage_net_map['public_network'] = constants.CTLPLANE_CIDR_DEFAULT - storage_net_map['cluster_network'] = constants.CTLPLANE_CIDR_DEFAULT - storage_net_map['ms_bind_ipv4'] = True - storage_net_map['ms_bind_ipv6'] = False - # this dict makes it easier to search for each network type in a loop - net_type = {} - net_type['public_network_name'] = public_network_name - net_type['cluster_network_name'] = cluster_network_name - - def _get_subnet(net, ip_subnet): - # Return the subnet, e.g. '192.168.24.0/24', as a string - # The net dict can either have a ip_subnet as a root element - # or a dict where multiple subnets are specified. If we have - # a subnets dict, then parse it looking for the ip_subnet key - if ip_subnet in net: - return net[ip_subnet] - if 'subnets' in net: - ip_subnets = list(map(lambda x: x.get(ip_subnet, ''), - net['subnets'].values())) - return ','.join(ip_subnets) - - with open(network_data_path, 'r') as stream: - try: - net_data = yaml.safe_load(stream) - except yaml.YAMLError as exc: - raise RuntimeError( - "yaml.safe_load(%s) returned '%s'" % (network_data_path, exc)) - - # 'name_lower' is not mandatory in net_data so give it the standard default - [net.setdefault('name_lower', net['name'].lower()) for net in net_data] - - for net in net_data: - if net.get('ipv6', False): - ip_subnet = 'ipv6_subnet' - else: - ip_subnet = 'ip_subnet' - for net_name, net_value in net_type.items(): - for search_tag in ['name_lower', 'service_net_map_replace']: - if net.get(search_tag, None) == net_value: - # if service_net_map_replace matched, still want name_lower - storage_net_map[net_name] = net['name_lower'] - subnet = _get_subnet(net, ip_subnet) - if not subnet: - error = ("While searching %s, %s matched %s " - "but that network did not have a %s " - "value set. To use an ipv6_subnet add " - "key 'ipv6: true' to %s in %s." - % (network_data_path, search_tag, - net_value, ip_subnet, net_value, - network_data_path)) - raise RuntimeError(error) - else: - subnet_key = net_name.replace('_name', '') - storage_net_map[subnet_key] = subnet - if ip_subnet == 'ipv6_subnet': - # If _any_ storage network has v6, then - # disable v4 and enable v6 ceph binding. - # public_network v4 and cluster_network v6 - # is not supported. - storage_net_map['ms_bind_ipv4'] = False - storage_net_map['ms_bind_ipv6'] = True - - return storage_net_map - - -def write_ephemeral_heat_clouds_yaml(heat_dir): - clouds_yaml_path = os.path.join(heat_dir, 'clouds.yaml') - clouds_dict = {} - clouds_dict['heat'] = {} - clouds_dict['heat']['auth_type'] = "none" - clouds_dict['heat']['endpoint'] = \ - "http://127.0.0.1:8006/v1/admin" - heat_yaml = dict(clouds=clouds_dict) - with open(clouds_yaml_path, 'w') as f: - f.write(yaml.dump(heat_yaml)) - - heatrc = textwrap.dedent(""" - # Clear any old environment that may conflict. - for key in $( set | awk -F= '/^OS_/ {print $1}' ); do - unset "${key}" - done - export OS_CLOUD=heat - # Add OS_CLOUDNAME to PS1 - if [ -z "${CLOUDPROMPT_ENABLED:-}" ]; then - export PS1=${PS1:-""} - export PS1=${OS_CLOUD:+"($OS_CLOUD)"} $PS1 - export CLOUDPROMPT_ENABLED=1 - fi - """) - - # Also write a heatrc file - heatrc_path = os.path.join(heat_dir, 'heatrc') - with open(heatrc_path, 'w') as f: - f.write(heatrc) - - -def get_host_groups_from_ceph_spec(ceph_spec_path, prefix='', - key='hostname', get_non_admin=True): - """Get hosts per group based on labels in ceph_spec_path file - :param ceph_spec_path: the path to a ceph_spec.yaml file - :param (prefix) append a prefix of the group, e.g. 'ceph_' - :param (key) can be set to 'addr' to retrun IP, defaults to 'hostname' - :param (get_non_admin), get hosts without _admin label, defaults to True - :return: dict mapping each label to a hosts list - """ - hosts = {} - if get_non_admin: - non_admin_key = prefix + 'non_admin' - hosts[non_admin_key] = [] - - with open(ceph_spec_path, 'r') as stream: - try: - for spec in yaml.safe_load_all(stream): - if spec.get('service_type', None) == 'host' and \ - 'labels' in spec.keys(): - for label in spec['labels']: - group_key = prefix + label - if group_key not in hosts.keys(): - hosts[group_key] = [] - hosts[group_key].append(spec[key]) - if get_non_admin and \ - '_admin' not in spec['labels']: - hosts[non_admin_key].append(spec[key]) - except yaml.YAMLError as exc: - raise RuntimeError( - "yaml.safe_load_all(%s) returned '%s'" % (ceph_spec_path, exc)) - - return hosts - - -def standalone_ceph_inventory(working_dir): - """return an ansible inventory for deployed ceph standalone - :param working_dir: directory where inventory should be written - :return string: the path to the inventory - """ - host = get_hostname() - inv = \ - {'Standalone': - {'hosts': {host: {}, - 'undercloud': {}}, - 'vars': {'ansible_connection': 'local', - 'ansible_host': host, - 'ansible_python_interpreter': sys.executable}}, - 'allovercloud': - {'children': {'Standalone': {}}}} - - path = os.path.join(working_dir, - constants.TRIPLEO_STATIC_INVENTORY) - with open(path, 'w') as f: - f.write(yaml.safe_dump(inv)) - return path - - -def process_ceph_daemons(daemon_path): - """Load the ceph daemons related extra_vars and return the associated dict - :param daemon_path: the path where the daemon definition is stored - :return: dict mapping each daemon option to a value passes to ansible - """ - extra_vars = dict() - with open(daemon_path, 'r') as f: - ceph_daemons = yaml.safe_load(f.read()) - try: - for daemon in ceph_daemons.keys(): - extra_vars['tripleo_cephadm_daemon_' + daemon] = True - # process current daemon paramters/options - for k, v in ceph_daemons.get(daemon).items(): - extra_vars[k] = v - except AttributeError: - return extra_vars - return extra_vars - - -def check_deploy_backups( - working_dir, - backup_usage_percent=constants.DEPLOY_BACKUPS_USAGE_PERCENT, - disk_usage_percent=constants.DISK_USAGE_PERCENT): - """Check the total space used by all deploy backups in the given - working_dir. If it exceeds the backup_usage_percent or total disk usage - exceeds disk_usage_percent, then print a warning. - """ - backup_files = glob.iglob( - os.path.join(working_dir, '..', '*', '*.tar.bzip2')) - backup_table = prettytable.PrettyTable( - ['Backup file', 'File size (KB)']) - - total_size = 0 - backup_file = None - - for backup_file in backup_files: - file_size = os.stat(backup_file).st_size - total_size += file_size - backup_table.add_row( - [os.path.realpath(backup_file), round(file_size / 1024, 2)]) - - if backup_file: - statvfs = os.statvfs(backup_file) - fs_size = statvfs.f_frsize * statvfs.f_blocks - fs_free = statvfs.f_frsize * statvfs.f_bfree - fs_usage = 1 - (fs_free / fs_size) - backup_usage = total_size / fs_size - - if (backup_usage > backup_usage_percent / 100): - LOG.warning( - "Deploy backup files disk usage {:.2%} exceeds {:d}% " - "percent of disk size. Consider deleting some " - "older deploy backups.".format(fs_usage, backup_usage_percent)) - print(backup_table, file=sys.stdout) - elif (fs_usage > disk_usage_percent / 100): - LOG.warning( - "Disk usage {:.2%} exceeds {:d}% " - "percent of disk size. Consider deleting some " - "older deploy backups.".format(fs_usage, disk_usage_percent)) - print(backup_table, file=sys.stdout) - - -def get_tripleo_cephadm_keys(username, key, pools): - """Get a tripleo_cephadm_keys structure to be passed to - the tripleo-ansible role tripleo_cephadm. Assumes only - one key will be created to write to all pools. - :param username: string, e.g. 'openstack' - :param key: string for cephx secret key, e.g. 'AQC+...w==' - :param pools: list of pool names, e.g. ['vms', 'images'] - :return a list containing a single dictionary - """ - return [dict( - name='client.' + username, - key=key, - mode='0600', - caps=dict( - mgr='allow *', - mon='profile rbd', - osd=', '.join(list( - map(lambda x: 'profile rbd pool=' + x, pools)))))] - - -def duplicate_param_check(user_environments): - """Register warnings when duplcate parameters are discovered. - - :param user_environments: List of user defined environment files. - :type user_environments: Array - """ - used_params = collections.defaultdict(int) - duplicate_params = dict() - for env_file in user_environments: - _env_file_parsed = url_parse.urlparse(env_file) - try: - with open(_env_file_parsed.path, 'r') as f: - _env_map = yaml.safe_load(f) - except FileNotFoundError: - continue - else: - LOG.debug('Inspecting "%s"', _env_file_parsed.path) - - for k, v in _env_map.get('parameter_defaults', {}).items(): - used_params[k] += 1 - if used_params[k] > 1: - duplicate_params[k] = v - - for k, v in duplicate_params.items(): - LOG.warning( - 'Duplicate parameter defined. Key: "%s", Current Value: %s', k, - yaml.dump(v, default_flow_style=False) - ) - - -def get_output_dir(output_dir: str, stack_name: str = "undercloud") -> str: - if not output_dir: - return os.path.join(constants.UNDERCLOUD_OUTPUT_DIR, - 'tripleo-deploy', stack_name) - return output_dir - - -def rotate_ansible_log(ansible_log_abspath): - now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') - new_ansible_log_abspath = os.path.join(ansible_log_abspath+"-"+now) - os.rename(ansible_log_abspath, new_ansible_log_abspath) diff --git a/tripleoclient/v1/__init__.py b/tripleoclient/v1/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/v1/container_image.py b/tripleoclient/v1/container_image.py deleted file mode 100644 index abdbf43d2..000000000 --- a/tripleoclient/v1/container_image.py +++ /dev/null @@ -1,598 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import copy -import datetime -import errno -from io import StringIO -import logging -import os -import shutil - -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from urllib import parse -import yaml - -from tripleo_common.image import image_uploader -from tripleo_common.image import kolla_builder -from tripleo_common.utils.locks import processlock -from tripleoclient import utils as oooutils - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import utils - - -def build_env_file(params, command_options): - - f = StringIO() - f.write('# Generated with the following on %s\n#\n' % - datetime.datetime.now().isoformat()) - f.write('# openstack %s\n#\n\n' % - ' '.join(command_options)) - - yaml.safe_dump({'parameter_defaults': params}, f, - default_flow_style=False) - return f.getvalue() - - -class TripleOContainerImagePush(command.Command): - """Push specified image to registry.""" - - auth_required = False - log = logging.getLogger(__name__ + ".TripleoContainerImagePush") - - def get_parser(self, prog_name): - parser = super(TripleOContainerImagePush, self).get_parser(prog_name) - parser.add_argument( - "--local", - dest="local", - default=False, - action="store_true", - help=_("Use this flag if the container image is already on the " - "current system and does not need to be pulled from a " - "remote registry.") - ) - parser.add_argument( - "--registry-url", - dest="registry_url", - metavar='', - default=None, - help=_("URL of the destination registry in the form " - ":.") - ) - parser.add_argument( - "--append-tag", - dest="append_tag", - default='', - help=_("Tag to append to the existing tag when pushing the " - "container. ") - ) - parser.add_argument( - "--username", - dest="username", - metavar='', - help=_("Username for the destination image registry.") - ) - parser.add_argument( - "--password", - dest="password", - metavar='', - help=_("Password for the destination image registry.") - ) - parser.add_argument( - "--source-username", - dest="source_username", - metavar='', - help=_("Username for the source image registry.") - ) - parser.add_argument( - "--source-password", - dest="source_password", - metavar='', - help=_("Password for the source image registry.") - ) - - parser.add_argument( - "--dry-run", - dest="dry_run", - action="store_true", - help=_("Perform a dry run upload. The upload action is not " - "performed, but the authentication process is attempted.") - ) - parser.add_argument( - "--multi-arch", - dest="multi_arch", - action="store_true", - help=_("Enable multi arch support for the upload.") - ) - parser.add_argument( - "--cleanup", - dest="cleanup", - action="store_true", - default=False, - help=_("Remove local copy of the image after uploading") - ) - parser.add_argument( - dest="image_to_push", - metavar='', - help=_("Container image to upload. Should be in the form of " - "//:. If tag is " - "not provided, then latest will be used.") - ) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - lock = processlock.ProcessLock() - manager = image_uploader.ImageUploadManager(lock=lock) - uploader = manager.uploader('python') - - source_image = parsed_args.image_to_push - - if parsed_args.local or source_image.startswith('containers-storage:'): - storage = 'containers-storage:' - if not source_image.startswith(storage): - source_image = storage + source_image.replace('docker://', '') - elif not parsed_args.local: - self.log.warning('Assuming local container based on provided ' - 'container path. (e.g. starts with ' - 'containers-storage:)') - source_url = parse.urlparse(source_image) - image_name = source_url.geturl() - image_source = None - if parsed_args.source_username or parsed_args.source_password: - self.log.warning('Source credentials ignored for local images') - else: - storage = 'docker://' - if not source_image.startswith(storage): - source_image = storage + source_image - source_url = parse.urlparse(source_image) - image_source = source_url.netloc - image_name = source_url.path[1:] - if len(image_name.split('/')) != 2: - raise exceptions.DownloadError('Invalid container. Provided ' - 'container image should be ' - '//:' - '') - if parsed_args.source_username or parsed_args.source_password: - if not parsed_args.source_username: - self.log.warning('Skipping authentication - missing source' - ' username') - elif not parsed_args.source_password: - self.log.warning('Skipping authentication - missing source' - ' password') - else: - uploader.authenticate(source_url, - parsed_args.source_username, - parsed_args.source_password) - - registry_url_arg = parsed_args.registry_url - if registry_url_arg is None: - registry_url_arg = image_uploader.get_undercloud_registry() - if not registry_url_arg.startswith('docker://'): - registry_url = 'docker://%s' % registry_url_arg - else: - registry_url = registry_url_arg - reg_url = parse.urlparse(registry_url) - - session = uploader.authenticate(reg_url, - parsed_args.username, - parsed_args.password) - try: - if not parsed_args.dry_run: - task = image_uploader.UploadTask( - image_name=image_name, - pull_source=image_source, - push_destination=registry_url_arg, - append_tag=parsed_args.append_tag, - modify_role=None, - modify_vars=None, - cleanup=parsed_args.cleanup, - multi_arch=parsed_args.multi_arch) - - uploader.add_upload_task(task) - uploader.run_tasks() - except OSError as e: - if e.errno == errno.EACCES: - self.log.error("Unable to upload due to permissions. " - "Please prefix command with sudo.") - raise oscexc.CommandError(e) - finally: - session.close() - - -class TripleOContainerImageDelete(command.Command): - """Delete specified image from registry.""" - - auth_required = False - log = logging.getLogger(__name__ + ".TripleoContainerImageDelete") - - def get_parser(self, prog_name): - parser = super(TripleOContainerImageDelete, self).get_parser(prog_name) - parser.add_argument( - "--registry-url", - dest="registry_url", - metavar='', - default=None, - help=_("URL of registry images are to be listed from in the " - "form :.") - ) - parser.add_argument( - dest="image_to_delete", - metavar='', - help=_("Full URL of image to be deleted in the " - "form :/path/to/image") - ) - parser.add_argument( - "--username", - dest="username", - metavar='', - help=_("Username for image registry.") - ) - parser.add_argument( - "--password", - dest="password", - metavar='', - help=_("Password for image registry.") - ) - parser.add_argument( - '-y', '--yes', - help=_('Skip yes/no prompt (assume yes).'), - default=False, - action="store_true") - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if not parsed_args.yes: - confirm = utils.prompt_user_for_confirmation( - message=_("Are you sure you want to delete this image " - "[y/N]? "), - logger=self.log) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - lock = processlock.ProcessLock() - manager = image_uploader.ImageUploadManager(lock=lock) - uploader = manager.uploader('python') - registry_url_arg = parsed_args.registry_url - if registry_url_arg is None: - registry_url_arg = image_uploader.get_undercloud_registry() - url = uploader._image_to_url(registry_url_arg) - session = uploader.authenticate(url, parsed_args.username, - parsed_args.password) - - try: - uploader.delete(parsed_args.image_to_delete, session=session) - except OSError as e: - if e.errno == errno.EACCES: - self.log.error("Unable to remove due to permissions. " - "Please prefix command with sudo.") - raise oscexc.CommandError(e) - finally: - session.close() - - -class TripleOContainerImageList(command.Lister): - """List images discovered in registry.""" - - auth_required = False - log = logging.getLogger(__name__ + ".TripleoContainerImageList") - - def get_parser(self, prog_name): - parser = super(TripleOContainerImageList, self).get_parser(prog_name) - parser.add_argument( - "--registry-url", - dest="registry_url", - metavar='', - default=None, - help=_("URL of registry images are to be listed from in the " - "form :.") - ) - parser.add_argument( - "--username", - dest="username", - metavar='', - help=_("Username for image registry.") - ) - parser.add_argument( - "--password", - dest="password", - metavar='', - help=_("Password for image registry.") - ) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - lock = processlock.ProcessLock() - manager = image_uploader.ImageUploadManager(lock=lock) - uploader = manager.uploader('python') - registry_url_arg = parsed_args.registry_url - if registry_url_arg is None: - registry_url_arg = image_uploader.get_undercloud_registry() - url = uploader._image_to_url(registry_url_arg) - session = uploader.authenticate(url, parsed_args.username, - parsed_args.password) - try: - results = uploader.list(url.geturl(), session=session) - finally: - session.close() - - cliff_results = [] - for r in results: - cliff_results.append((r,)) - return (("Image Name",), cliff_results) - - -class TripleOContainerImageShow(command.ShowOne): - """Show image selected from the registry.""" - - auth_required = False - log = logging.getLogger(__name__ + ".TripleoContainerImageShow") - - @property - def formatter_default(self): - return 'json' - - def get_parser(self, prog_name): - parser = super(TripleOContainerImageShow, self).get_parser(prog_name) - parser.add_argument( - "--username", - dest="username", - metavar='', - help=_("Username for image registry.") - ) - parser.add_argument( - "--password", - dest="password", - metavar='', - help=_("Password for image registry.") - ) - parser.add_argument( - dest="image_to_inspect", - metavar='', - help=_( - "Image to be inspected, for example: " - "docker.io/library/centos:7 or " - "docker://docker.io/library/centos:7") - ) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - lock = processlock.ProcessLock() - manager = image_uploader.ImageUploadManager(lock=lock) - uploader = manager.uploader('python') - url = uploader._image_to_url(parsed_args.image_to_inspect) - session = uploader.authenticate(url, parsed_args.username, - parsed_args.password) - try: - image_inspect_result = uploader.inspect( - parsed_args.image_to_inspect, - session=session) - finally: - session.close() - - return self.format_image_inspect(image_inspect_result) - - def format_image_inspect(self, image_inspect_result): - column_names = ['Name'] - data = [image_inspect_result.pop('Name')] - - result_fields = list(image_inspect_result.keys()) - result_fields.sort() - for field in result_fields: - column_names.append(field) - data.append(image_inspect_result[field]) - - return column_names, data - - -class TripleOImagePrepareDefault(command.Command): - """Generate a default ContainerImagePrepare parameter.""" - - auth_required = False - log = logging.getLogger(__name__ + ".TripleoImagePrepare") - - def get_parser(self, prog_name): - parser = super(TripleOImagePrepareDefault, self).get_parser(prog_name) - parser.add_argument( - "--output-env-file", - dest="output_env_file", - metavar='', - help=_("File to write environment file containing default " - "ContainerImagePrepare value."), - ) - parser.add_argument( - '--local-push-destination', - dest='push_destination', - action='store_true', - default=False, - help=_('Include a push_destination to trigger upload to a local ' - 'registry.') - ) - parser.add_argument( - '--enable-registry-login', - dest='registry_login', - action='store_true', - default=False, - help=_('Use this flag to enable the flag to have systems attempt ' - 'to login to a remote registry prior to pulling their ' - 'containers. This flag should be used when ' - '--local-push-destination is *NOT* used and the target ' - 'systems will have network connectivity to the remote ' - 'registries. Do not use this for an overcloud that ' - 'may not have network connectivity to a remote registry.') - ) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - cip = copy.deepcopy(kolla_builder.CONTAINER_IMAGE_PREPARE_PARAM) - if parsed_args.push_destination: - for entry in cip: - entry['push_destination'] = True - params = { - 'ContainerImagePrepare': cip - } - if parsed_args.registry_login: - if parsed_args.push_destination: - self.log.warning('[WARNING] --local-push-destination was used ' - 'with --enable-registry-login. Please make ' - 'sure you understand the use of these ' - 'parameters together as they can cause ' - 'deployment failures.') - self.log.warning('[NOTE] Make sure to update the paramter_defaults' - ' with ContainerImageRegistryCredentials for the ' - 'registries requiring authentication.') - params['ContainerImageRegistryLogin'] = True - - env_data = build_env_file(params, self.app.command_options) - self.app.stdout.write(env_data) - if parsed_args.output_env_file: - if os.path.exists(parsed_args.output_env_file): - self.log.warning("Output env file exists, " - "moving it to backup.") - shutil.move(parsed_args.output_env_file, - parsed_args.output_env_file + ".backup") - utils.safe_write(parsed_args.output_env_file, env_data) - - -class TripleOImagePrepare(command.Command): - """Prepare and upload containers from a single command.""" - - auth_required = False - log = logging.getLogger(__name__ + ".TripleoImagePrepare") - - def get_parser(self, prog_name): - parser = super(TripleOImagePrepare, self).get_parser(prog_name) - try: - roles_file = utils.rel_or_abs_path( - constants.OVERCLOUD_ROLES_FILE, - constants.TRIPLEO_HEAT_TEMPLATES) - except exceptions.DeploymentError: - roles_file = None - parser.add_argument( - '--environment-file', '-e', metavar='', - action='append', dest='environment_files', - help=_('Environment file containing the ContainerImagePrepare ' - 'parameter which specifies all prepare actions. ' - 'Also, environment files specifying which services are ' - 'containerized. Entries will be filtered to only contain ' - 'images used by containerized services. (Can be specified ' - 'more than once.)') - ) - parser.add_argument( - '--environment-directory', metavar='', - action='append', dest='environment_directories', - default=[os.path.expanduser(constants.DEFAULT_ENV_DIRECTORY)], - help=_('Environment file directories that are automatically ' - 'added to the environment. ' - 'Can be specified more than once. Files in directories are ' - 'loaded in ascending sort order.') - ) - parser.add_argument( - '--roles-file', '-r', dest='roles_file', - default=roles_file, - help=_( - 'Roles file, overrides the default %s in the t-h-t templates ' - 'directory used for deployment. May be an ' - 'absolute path or the path relative to the templates dir.' - ) % constants.OVERCLOUD_ROLES_FILE - ) - parser.add_argument( - "--output-env-file", - dest="output_env_file", - metavar='', - help=_("File to write heat environment file which specifies all " - "image parameters. Any existing file will be overwritten."), - ) - parser.add_argument( - '--dry-run', - dest='dry_run', - action='store_true', - default=False, - help=_('Do not perform any pull, modify, or push operations. ' - 'The environment file will still be populated as if these ' - 'operations were performed.') - ) - parser.add_argument( - "--cleanup", - dest="cleanup", - metavar='', - default=image_uploader.CLEANUP_FULL, - help=_("Cleanup behavior for local images left after upload. " - "The default 'full' will attempt to delete all local " - "images. 'partial' will leave images required for " - "deployment on this host. 'none' will do no cleanup.") - ) - parser.add_argument( - "--log-file", - dest="log_file", - default=constants.CONTAINER_IMAGE_PREPARE_LOG_FILE, - help=_("Log file to be used for python logging. " - "By default it would be logged to " - "$HOME/container_image_prepare.log.") - ) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.cleanup not in image_uploader.CLEANUP: - raise oscexc.CommandError('--cleanup must be one of: %s' % - ', '.join(image_uploader.CLEANUP)) - - role_file = None - if parsed_args.roles_file: - role_file = utils.rel_or_abs_path(parsed_args.roles_file, - constants.TRIPLEO_HEAT_TEMPLATES) - env_dirs = [os.path.abspath(x) - for x in parsed_args.environment_directories] - env_files = [os.path.abspath(x) - for x in (parsed_args.environment_files or [])] - - extra_vars = { - "roles_file": role_file, - "environment_directories": env_dirs, - "environment_files": env_files, - "cleanup": parsed_args.cleanup, - "dry_run": parsed_args.dry_run, - "log_file": parsed_args.log_file} - - if self.app_args.verbose_level >= 3: - extra_vars["debug"] = True - - if parsed_args.output_env_file: - extra_vars["output_env_file"] = os.path.abspath( - parsed_args.output_env_file) - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-container-image-prepare.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars) diff --git a/tripleoclient/v1/overcloud_admin.py b/tripleoclient/v1/overcloud_admin.py deleted file mode 100644 index 96bdeb2d8..000000000 --- a/tripleoclient/v1/overcloud_admin.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -from oslo_config import cfg -from oslo_log import log as logging - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils -from tripleoclient.constants import ANSIBLE_TRIPLEO_PLAYBOOKS - -CONF = cfg.CONF - - -class Authorize(command.Command): - "Deploy the ssh keys needed by Mistral." - - log = logging.getLogger(__name__ + ".AdminAuthorize") - - def get_parser(self, prog_name): - parser = super(Authorize, self).get_parser(prog_name) - - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - - parser.add_argument( - '--overcloud-ssh-user', - default='tripleo-admin', - help=_('User for ssh access to overcloud nodes') - ) - parser.add_argument( - '--overcloud-ssh-key', - default=None, - help=_('Key path for ssh access to overcloud nodes. When' - 'undefined the key will be autodetected.') - ) - parser.add_argument( - '--overcloud-ssh-network', - help=_('DEPRECATED: Network name to use for ssh access to ' - 'overcloud nodes. This has no effect now.'), - default='ctlplane' - ) - parser.add_argument( - '--overcloud-ssh-enable-timeout', - help=_('This option no longer has any effect.'), - type=int, - default=constants.ENABLE_SSH_ADMIN_TIMEOUT - ) - parser.add_argument( - '--overcloud-ssh-port-timeout', - help=_('Timeout for the ssh port to become active.'), - type=int, - default=constants.ENABLE_SSH_ADMIN_SSH_PORT_TIMEOUT - ) - parser.add_argument( - '--static-inventory', - dest='static_inventory', - action='store', - default=None, - help=_('Path to an existing ansible inventory to ' - 'use. If not specified, one will be ' - 'generated in ' - '~/tripleo-ansible-inventory.yaml') - ) - parser.add_argument( - '--limit', - dest='limit_hosts', - action='store', - default='all', - help=_('Define which hosts or group of hosts to ' - 'run the Admin Authorize tasks against.') - ) - - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action({})".format(parsed_args)) - ansible_dir = os.path.join(oooutils.get_default_working_dir( - parsed_args.stack - ), - 'config-download', - parsed_args.stack) - - if parsed_args.overcloud_ssh_network: - self.log.warning('The --overcloud-ssh-network option is ' - 'deprecated and has no effect now.') - - if not parsed_args.static_inventory: - inventory = os.path.join(ansible_dir, - 'tripleo-ansible-inventory.yaml') - else: - inventory = parsed_args.static_inventory - - key_file = oooutils.get_key(parsed_args.stack) - - if not parsed_args.limit_hosts: - limit_hosts = parsed_args.stack - else: - limit_hosts = parsed_args.limit_hosts - - all_hosts = oooutils.parse_ansible_inventory(inventory) - - oooutils.run_ansible_playbook( - playbook='cli-enable-ssh-admin.yaml', - inventory=inventory, - workdir=ansible_dir, - key=parsed_args.overcloud_ssh_key, - playbook_dir=ANSIBLE_TRIPLEO_PLAYBOOKS, - ssh_user=parsed_args.overcloud_ssh_user, - extra_vars={ - "ANSIBLE_PRIVATE_KEY_FILE": key_file, - "ssh_servers": all_hosts - }, - limit_hosts='localhost,{}'.format(limit_hosts), - ansible_timeout=parsed_args.overcloud_ssh_port_timeout - ) diff --git a/tripleoclient/v1/overcloud_backup.py b/tripleoclient/v1/overcloud_backup.py deleted file mode 100644 index 32db6fb26..000000000 --- a/tripleoclient/v1/overcloud_backup.py +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import logging -import os -import yaml - -from osc_lib.command import command -from osc_lib.i18n import _ - -from tripleoclient import constants -from tripleoclient import utils - -INVENTORY = constants.ANSIBLE_INVENTORY.format('overcloud') - - -class BackupOvercloud(command.Command): - """Backup the Overcloud""" - - log = logging.getLogger(__name__ + ".BackupOvercloud") - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - - parser.add_argument( - '--init', - const='rear', - nargs='?', - action='store', - help=_("Initialize environment for backup, " - "using 'rear', 'nfs' or 'ironic' as args " - "which will check for package install " - "and configured ReaR or NFS server. " - "Defaults to: rear. " - "i.e. --init rear. " - "WARNING: This flag will be deprecated " - "and replaced by '--setup-rear' ," - "'--setup-nfs' and '--setup-ironic'.") - ) - - parser.add_argument( - '--setup-nfs', - default=False, - action='store_true', - help=_("Setup the NFS server on the backup node " - "which will install required packages " - "and configuration on the host 'BackupNode' " - "in the ansible inventory.") - ) - - parser.add_argument( - '--setup-rear', - default=False, - action='store_true', - help=_("Setup ReaR on the overcloud 'Controller' hosts which will " - "install and configure ReaR.") - ) - - parser.add_argument( - '--setup-ironic', - default=False, - action='store_true', - help=_("Setup ReaR on the overcloud 'Controller' hosts which will " - "install and configure ReaR with ironic") - ) - - parser.add_argument( - '--cron', - default=False, - action='store_true', - help=_("Sets up a new cron job that by default will " - "execute a weekly backup at Sundays midnight, " - "but that can be customized by using the " - "tripleo_backup_and_restore_cron extra-var.") - ) - - parser.add_argument( - '--inventory', - default=INVENTORY, - help=_("Tripleo inventory file generated with " - "tripleo-ansible-inventory command. " - "Defaults to: " + INVENTORY) - ) - - parser.add_argument( - '--storage-ip', - help=_("Storage IP is an optional parameter " - "which allows for an ip of a storage " - "server to be specified, overriding the " - "default undercloud. " - "WARNING: This flag will be deprecated in " - "favor of '--extra-vars' which will allow " - "to pass this and other variables.") - ) - - parser.add_argument( - '--extra-vars', - default=None, - action='store', - help=_("Set additional variables as Dict or as " - "an absolute path of a JSON or YAML file type. " - "i.e. --extra-vars '{\"key\": \"val\", " - " \"key2\": \"val2\"}' " - "i.e. --extra-vars /path/to/my_vars.yaml " - "i.e. --extra-vars /path/to/my_vars.json. " - "For more information about the variables that " - "can be passed, visit: https://opendev.org/openstack/" - "tripleo-ansible/src/branch/master/tripleo_ansible/" - "roles/backup_and_restore/defaults/main.yml.") - ) - - return parser - - def _parse_extra_vars(self, raw_extra_vars): - - if raw_extra_vars is None: - extra_vars = {} - elif os.path.exists(raw_extra_vars): - with open(raw_extra_vars, 'r') as fp: - extra_vars = yaml.safe_load(fp.read()) - else: - try: - extra_vars = yaml.safe_load(raw_extra_vars) - except yaml.YAMLError as exc: - raise RuntimeError( - _('--extra-vars is not an existing file and cannot be ' - 'parsed as YAML / JSON: %s') % exc) - - return extra_vars - - def _run_backup_overcloud(self, parsed_args): - """Backup defined overcloud nodes.""" - - extra_vars = self._parse_extra_vars(parsed_args.extra_vars) - - if parsed_args.storage_ip: - storage_ip = parsed_args.storage_ip - - extra_vars[ - 'tripleo_backup_and_restore_nfs_server' - ] = storage_ip - - if not (os.path.isfile(parsed_args.inventory) and - os.access(parsed_args.inventory, os.R_OK)): - raise RuntimeError( - _('The inventory file {} does not exist or is not ' - 'readable'.format(parsed_args.inventory))) - - if parsed_args.setup_nfs is True or parsed_args.init == 'nfs': - - self.log.debug(_('Setting up NFS Backup node')) - self._run_ansible_playbook( - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - extra_vars=extra_vars - ) - - if parsed_args.setup_rear is True or parsed_args.init == 'rear': - - self.log.debug(_('Installing ReaR on controller nodes')) - self._run_ansible_playbook( - playbook='prepare-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - extra_vars=extra_vars - ) - - if parsed_args.setup_ironic is True or parsed_args.init == 'ironic': - - self.log.debug(_('Installing Rear/Ironic on nodes')) - self._run_ansible_playbook( - playbook='cli-overcloud-conf-ironic.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - extra_vars=extra_vars - ) - - if parsed_args.cron is True: - - self.log.debug(_('Programming cron backup')) - self._run_ansible_playbook( - playbook='cli-overcloud-backup-cron.yaml', - inventory=parsed_args.inventory, - tags=None, - skip_tags=None, - extra_vars=extra_vars - ) - - if (parsed_args.setup_nfs is False and - parsed_args.setup_rear is False and - parsed_args.setup_ironic is False and - parsed_args.cron is False and - parsed_args.init is None): - - self.log.warning( - '\n' - ' ########################################################\n' - ' # Deprecation note #\n' - ' # Backup and restore feature is deprecated and will be #\n' - ' # removed in the next release. Please consider using #\n' - ' # snapshot and revert feature. #\n' - ' ########################################################\n') - self.log.debug(_('Starting Overcloud Backup')) - self._run_ansible_playbook( - playbook='cli-overcloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_create_recover_image', - skip_tags=None, - extra_vars=extra_vars - ) - - def _run_ansible_playbook(self, - playbook, - inventory, - tags, - skip_tags, - extra_vars): - """Run ansible playbook""" - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook=playbook, - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - tags=tags, - skip_tags=skip_tags, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars - ) - - def take_action(self, parsed_args): - - if parsed_args.init: - - self.log.warning("The following flags will be deprecated:" - "[--init, --storage-ip]") - - self._run_backup_overcloud(parsed_args) - print( - '\n' - ' #############################################################\n' - ' # Disclaimer #\n' - ' # Backup verification is the End Users responsibility #\n' - ' # Please verify backup integrity before any possible #\n' - ' # disruptive actions against the Overcloud. The resulting #\n' - ' # backup file path will be shown on a successful execution. #\n' - ' # #\n' - ' # .-Stay safe and avoid future issues-. #\n' - ' #############################################################\n' - ) - - -class BackupSnapshot(command.Command): - """Takes and LVM snapshot ignoring all the rest - of the parameters passed. To be able to take - a snapshot, the following conditions must - be met: - - The disk must be configured to use LVM - - There must be an lv called lv_snapshot - - lv_snapshot must be 8GB or more - This operation will destroy the lv_snapshot volume - and replace it with snapshots of the disks. - """ - - log = logging.getLogger(__name__ + ".BackupSnapshotOvercloud") - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - - parser.add_argument( - '--inventory', - default=INVENTORY, - help=_("Tripleo inventory file generated with " - "tripleo-ansible-inventory command. " - "Defaults to: " + INVENTORY) - ) - - parser.add_argument( - '--remove', - default=False, - action='store_true', - help=_("Removes all the snapshot volumes " - "that were created.") - ) - - parser.add_argument( - '--revert', - default=False, - action='store_true', - help=_("Reverts all the disks to the moment " - "when the snapshot was created.") - ) - - parser.add_argument( - '--extra-vars', - default=None, - action='store', - help=_("Set additional variables as Dict or as " - "an absolute path of a JSON or YAML file type. " - "i.e. --extra-vars '{\"key\": \"val\", " - " \"key2\": \"val2\"}' " - "i.e. --extra-vars /path/to/my_vars.yaml " - "i.e. --extra-vars /path/to/my_vars.json. " - "For more information about the variables that " - "can be passed, visit: https://opendev.org/openstack/" - "tripleo-ansible/src/branch/master/tripleo_ansible/" - "roles/backup_and_restore/defaults/main.yml.") - ) - - return parser - - def _parse_extra_vars(self, raw_extra_vars): - - if raw_extra_vars is None: - extra_vars = {} - elif os.path.exists(raw_extra_vars): - with open(raw_extra_vars, 'r') as fp: - extra_vars = yaml.safe_load(fp.read()) - else: - try: - extra_vars = yaml.safe_load(raw_extra_vars) - except yaml.YAMLError as exc: - raise RuntimeError( - _('--extra-vars is not an existing file and cannot be ' - 'parsed as YAML / JSON: %s') % exc) - - return extra_vars - - def _run_snapshot_overcloud(self, parsed_args): - """Snapshot defined overcloud nodes.""" - - extra_vars = self._parse_extra_vars(parsed_args.extra_vars) - - if not (os.path.isfile(parsed_args.inventory) and - os.access(parsed_args.inventory, os.R_OK)): - raise RuntimeError( - _('The inventory file {} does not exist or is not ' - 'readable'.format(parsed_args.inventory))) - - if parsed_args.remove is True and parsed_args.revert is True: - raise RuntimeError( - _('--revert and --remove are mutually exclusive')) - if parsed_args.remove is True and parsed_args.revert is False: - tags = 'remove_snapshots' - if parsed_args.revert is True and parsed_args.remove is False: - tags = 'revert_snapshots' - if parsed_args.remove is False and parsed_args.revert is False: - tags = 'create_snapshots' - - self.log.debug(_('Starting Overcloud Snapshot')) - self._run_ansible_playbook( - playbook='cli-overcloud-snapshot.yaml', - inventory=parsed_args.inventory, - tags=tags, - skip_tags=None, - extra_vars=extra_vars - ) - - def _run_ansible_playbook(self, - playbook, - inventory, - tags, - skip_tags, - extra_vars): - """Run ansible playbook""" - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook=playbook, - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - tags=tags, - skip_tags=skip_tags, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars - ) - - def take_action(self, parsed_args): - self._run_snapshot_overcloud(parsed_args) diff --git a/tripleoclient/v1/overcloud_bios.py b/tripleoclient/v1/overcloud_bios.py deleted file mode 100644 index 81867933f..000000000 --- a/tripleoclient/v1/overcloud_bios.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import os - -from osc_lib.i18n import _ -import yaml - -from tripleoclient import command -from tripleoclient import utils -from tripleoclient.workflows import baremetal - - -class ConfigureBIOS(command.Command): - """Apply BIOS configuration on given nodes""" - - log = logging.getLogger(__name__ + ".ConfigureBIOS") - - def get_parser(self, prog_name): - parser = super(ConfigureBIOS, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('node_uuids', - nargs="*", - metavar="", - default=[], - help=_('Baremetal Node UUIDs for the node(s) to ' - 'configure BIOS')) - group.add_argument("--all-manageable", - action='store_true', - help=_("Configure BIOS for all nodes currently in " - "'manageable' state")) - parser.add_argument('--configuration', metavar='', - dest='configuration', - help=_('BIOS configuration (YAML/JSON string or ' - 'file name).')) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action({args})".format(args=parsed_args)) - - if os.path.exists(parsed_args.configuration): - with open(parsed_args.configuration, 'r') as fp: - configuration = yaml.safe_load(fp.read()) - else: - try: - configuration = yaml.safe_load(parsed_args.configuration) - except yaml.YAMLError as exc: - raise RuntimeError( - _('Configuration is not an existing file and cannot be ' - 'parsed as YAML: %s') % exc) - - # Basic sanity check, we defer the full check to Ironic - try: - settings = configuration['settings'] - except KeyError: - raise ValueError( - _('Configuration must contain key "settings"')) - except TypeError: - raise TypeError( - _('Configuration must be an object, got %r instead') - % configuration) - - if (not isinstance(settings, list) or - not all(isinstance(item, dict) for item in settings)): - raise TypeError( - _('BIOS settings list is expected to be a list of ' - 'objects, got %r instead') % settings) - - clients = self.app.client_manager - if parsed_args.node_uuids: - baremetal.apply_bios_configuration( - node_uuids=parsed_args.node_uuids, - configuration=configuration, - verbosity=utils.playbook_verbosity(self=self) - ) - else: - baremetal.apply_bios_configuration_on_manageable_nodes( - clients, - configuration=configuration, - verbosity=utils.playbook_verbosity(self=self) - ) - - -class ResetBIOS(command.Command): - """Reset BIOS configuration to factory default""" - - log = logging.getLogger(__name__ + ".ResetBIOS") - - def get_parser(self, prog_name): - parser = super(ResetBIOS, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('node_uuids', - nargs="*", - metavar="", - default=[], - help=_('Baremetal Node UUIDs for the node(s) to ' - 'reset BIOS')) - group.add_argument("--all-manageable", - action='store_true', - help=_("Reset BIOS on all nodes currently in " - "'manageable' state")) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action({args})".format(args=parsed_args)) - - clients = self.app.client_manager - if parsed_args.node_uuids: - baremetal.reset_bios_configuration( - node_uuids=parsed_args.node_uuids, - verbosity=utils.playbook_verbosity(self=self) - ) - else: - baremetal.reset_bios_configuration_on_manageable_nodes( - clients=clients, - verbosity=utils.playbook_verbosity(self=self) - ) diff --git a/tripleoclient/v1/overcloud_cell.py b/tripleoclient/v1/overcloud_cell.py deleted file mode 100644 index b20ca63e6..000000000 --- a/tripleoclient/v1/overcloud_cell.py +++ /dev/null @@ -1,139 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -import logging -import os.path -import yaml - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient import command -from tripleoclient import exceptions -from tripleoclient import export -from tripleoclient import utils as oooutils - - -class ExportCell(command.Command): - """Export cell information used as import of another cell""" - - log = logging.getLogger(__name__ + ".ExportCell") - now = datetime.now().strftime('%Y%m%d%H%M%S') - - def get_parser(self, prog_name): - parser = super(ExportCell, self).get_parser(prog_name) - parser.add_argument('--control-plane-stack', - dest='control_plane_stack', - metavar='', - help=_('Name of the environment main Heat stack ' - 'to export information from. ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('--cell-stack', '-e', metavar='', - help=_('Name of the controller cell Heat stack to ' - 'export information from. Used in case of: ' - 'control plane stack -> cell controller ' - 'stack -> multiple compute stacks')) - parser.add_argument('--output-file', '-o', metavar='', - help=_('Name of the output file for the cell data ' - 'export. It will default to ".yaml"')) - parser.add_argument('--working-dir', action='store', - help=_('The working directory for the ' - 'deployment where all input, output, and ' - 'generated files are stored. Defaults to ' - '"$HOME/overcloud-deploy/"')) - parser.add_argument('--config-download-dir', - action='store', - help=_('Directory to search for config-download ' - 'export data. Defaults to $HOME/' - 'overcloud-deploy//config-download')) - parser.add_argument('--force-overwrite', '-f', action='store_true', - default=False, - help=_('Overwrite output file if it exists.')) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - control_plane_stack = parsed_args.control_plane_stack - cell_stack = parsed_args.cell_stack - cell_name = control_plane_stack - if cell_stack: - cell_name = cell_stack - output_file = parsed_args.output_file or \ - '%s-cell-export.yaml' % cell_name - - self.log.info('Running at %s with parameters %s', - self.now, - parsed_args) - - if os.path.exists(output_file) and not parsed_args.force_overwrite: - raise exceptions.CellExportError( - "File '%s' already exists, not exporting." % output_file) - - stack_to_export = control_plane_stack - should_filter = True - if cell_stack: - stack_to_export = cell_stack - should_filter = False - - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir(stack_to_export) - else: - working_dir = parsed_args.working_dir - - if not parsed_args.config_download_dir: - config_download_dir = os.path.join(os.environ.get('HOME'), - "overcloud-deploy", - stack_to_export, - 'config-download') - else: - config_download_dir = parsed_args.config_download_dir - - data = export.export_passwords(working_dir, stack_to_export) - - data.update(export.export_stack( - working_dir, - stack_to_export, should_filter, - config_download_dir)) - data = dict(parameter_defaults=data) - - # write the exported data - with open(output_file, 'w') as f: - yaml.safe_dump(data, f, default_flow_style=False) - - print("Cell information exported to %s." % output_file) - - msg = """ \n\n - Next steps: - ===========\n - * Create roles file for cell stack, e.g.: - openstack overcloud roles generate --roles-path \\ - /usr/share/openstack-tripleo-heat-templates/roles \\ - -o cell_roles_data.yaml Compute CellController - * Create new flavor used to tag the cell controller - * Tag cell controller nodes into the new flavor - * Create cell parameter file as explained in bellow doc link - * Deploy the cell and make sure to add the following information - to the deploy command: - - additional environment files used for overcloud stack - - --stack - - cell role file created - - the exported cell information file {output_file} - - other specific parameter files for the cell\n - For more details check https://docs.openstack.org/ - project-deploy-guide/tripleo-docs/latest/features/deploy_cellv2.html - """.format(output_file=output_file) - - print(msg) diff --git a/tripleoclient/v1/overcloud_credentials.py b/tripleoclient/v1/overcloud_credentials.py deleted file mode 100644 index a79a5d08a..000000000 --- a/tripleoclient/v1/overcloud_credentials.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from osc_lib.i18n import _ - -from tripleoclient import command -from tripleoclient.workflows import deployment -from tripleoclient import utils - - -class OvercloudCredentials(command.Command): - """Create the overcloudrc files""" - - log = logging.getLogger(__name__ + ".OvercloudCredentials") - - def get_parser(self, prog_name): - parser = super(OvercloudCredentials, self).get_parser(prog_name) - parser.add_argument( - 'stack', - help=_("The name of the stack you want to " - "create rc files for.")) - parser.add_argument( - '--directory', - default=".", - nargs='?', - help=_("The directory to create the rc files. " - "Defaults to the current directory.")) - parser.add_argument( - '--working-dir', - action='store', - help=_('The working directory that contains the input, output, ' - 'and generated files for the deployment.\n' - 'Defaults to "$HOME/overcloud-deploy/"') - ) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - if not parsed_args.working_dir: - working_dir = utils.get_default_working_dir(parsed_args.stack) - else: - working_dir = parsed_args.working_dir - rc_params = utils.get_rc_params( - working_dir) - endpoint = utils.get_overcloud_endpoint(working_dir) - admin_vip = utils.get_stack_saved_output_item( - 'KeystoneAdminVip', working_dir) - deployment.create_overcloudrc( - parsed_args.stack, endpoint, admin_vip, rc_params, - output_dir=parsed_args.directory) diff --git a/tripleoclient/v1/overcloud_deploy.py b/tripleoclient/v1/overcloud_deploy.py deleted file mode 100644 index a58509597..000000000 --- a/tripleoclient/v1/overcloud_deploy.py +++ /dev/null @@ -1,1354 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import os -import os.path -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from prettytable import PrettyTable -from pwd import getpwuid -import shutil -import time -import urllib -import yaml - -from heatclient.common import template_utils -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from tripleo_common.utils import plan as plan_utils - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import export -from tripleoclient import utils -from tripleoclient.workflows import deployment -from tripleoclient.workflows import parameters as workflow_params - -CONF = cfg.CONF - - -def _validate_args_environment_dir(dirs): - default = os.path.expanduser(constants.DEFAULT_ENV_DIRECTORY) - not_found = [d for d in dirs if not os.path.isdir(d) and d != default] - - if not_found: - raise oscexc.CommandError( - "Error: The following environment directories were not found" - ": {0}".format(", ".join(not_found))) - - -def _update_args_from_answers_file(parsed_args): - if parsed_args.answers_file is None: - return - - with open(parsed_args.answers_file, 'r') as answers_file: - answers = yaml.safe_load(answers_file) - - if parsed_args.templates is None: - parsed_args.templates = answers['templates'] - if 'environments' in answers: - if parsed_args.environment_files is not None: - answers['environments'].extend(parsed_args.environment_files) - parsed_args.environment_files = answers['environments'] - if 'roles' in answers: - if parsed_args.roles_file is None: - parsed_args.roles_file = answers['roles'] - if 'networks' in answers: - if parsed_args.networks_file is None: - parsed_args.networks_file = answers['networks'] - - -def _validate_args(parsed_args): - if parsed_args.templates is None and parsed_args.answers_file is None: - raise oscexc.CommandError( - "You must specify either --templates or --answers-file") - - if not parsed_args.deployed_server: - raise oscexc.CommandError( - "Error: --provision-node is no longer supported") - - if (parsed_args.baremetal_deployment - and (parsed_args.config_download_only or parsed_args.setup_only)): - raise oscexc.CommandError( - "Error: --config-download-only/--setup-only must not be used when " - "using --baremetal-deployment") - - if (parsed_args.network_config and not parsed_args.baremetal_deployment): - raise oscexc.CommandError( - "Error: --baremetal-deployment must be used when using " - "--network-config") - - if parsed_args.environment_directories: - _validate_args_environment_dir(parsed_args.environment_directories) - - not_found = [x for x in [parsed_args.networks_file, - parsed_args.answers_file, - parsed_args.vip_file] - if x and not os.path.isfile(x)] - - jinja2_envs = [] - if parsed_args.environment_files: - for env in parsed_args.environment_files: - if env.endswith(".j2.yaml"): - jinja2_envs.append(env) - continue - - # Tolerate missing file if there's a j2.yaml file that will - # be rendered in the plan but not available locally (yet) - if (not os.path.isfile(env) - and not os.path.isfile(env.replace(".yaml", ".j2.yaml"))): - not_found.append(env) - - if not_found: - raise oscexc.CommandError( - "Error: The following files were not found: {}".format( - ", ".join(not_found))) - - if jinja2_envs: - rewritten_paths = [e.replace(".j2.yaml", ".yaml") for e in jinja2_envs] - raise oscexc.CommandError( - "Error: The following jinja2 files were provided: {}. Did you " - "mean {}?".format(' -e '.join(jinja2_envs), - ' -e '.join(rewritten_paths))) - - -def _validate_vip_file(stack, working_dir): - # Check vip_file only used with network data v2 - networks_file_path = utils.get_networks_file_path(working_dir, stack) - if not utils.is_network_data_v2(networks_file_path): - raise oscexc.CommandError( - 'The --vip-file option can only be used in combination with a ' - 'network data v2 format networks file. The provided file {} ' - 'is network data v1 format'.format(networks_file_path)) - - -class DeployOvercloud(command.Command): - """Deploy Overcloud""" - - log = logging.getLogger(__name__ + ".DeployOvercloud") - - def _setup_clients(self, parsed_args): - self.clients = self.app.client_manager - self.orchestration_client = self.clients.orchestration - - def _update_parameters(self, args, parameters, - tht_root, user_tht_root): - parameters['RootStackName'] = args.stack - if not args.skip_deploy_identifier: - parameters['DeployIdentifier'] = int(time.time()) - else: - parameters['DeployIdentifier'] = '' - - # Check for existing passwords file - password_params_path = os.path.join( - self.working_dir, - constants.PASSWORDS_ENV_FORMAT.format(args.stack)) - if os.path.exists(password_params_path): - with open(password_params_path, 'r') as f: - passwords_env = yaml.safe_load(f.read()) - else: - passwords_env = None - - heat = None - password_params = plan_utils.generate_passwords( - None, heat, args.stack, passwords_env=passwords_env) - - # Save generated passwords file - with open(password_params_path, 'w') as f: - f.write(yaml.safe_dump(dict(parameter_defaults=password_params))) - os.chmod(password_params_path, 0o600) - - parameters.update(password_params) - - param_args = ( - ('NtpServer', 'ntp_server'), - ('NovaComputeLibvirtType', 'libvirt_type'), - ) - - # Update parameters from commandline - for param, arg in param_args: - if getattr(args, arg) is not None: - parameters[param] = getattr(args, arg) - - parameters[ - 'UndercloudHostsEntries'] = [utils.get_undercloud_host_entry()] - - parameters['CtlplaneNetworkAttributes'] = utils.get_ctlplane_attrs() - - return parameters - - def _check_limit_skiplist_warning(self, env): - if env.get('parameter_defaults').get('DeploymentServerBlacklist'): - msg = _('[WARNING] DeploymentServerBlacklist is defined and will ' - 'be ignored because --limit has been specified.') - self.log.warning(msg) - - def _heat_deploy(self, stack_name, template_path, - env_files, timeout, tht_root, env, - run_validations, - roles_file, - env_files_tracker=None, - deployment_options=None): - """Verify the Baremetal nodes are available and do a stack update""" - - self.log.debug("Getting template contents from plan %s" % stack_name) - - template_files, template = template_utils.get_template_contents( - template_file=template_path) - files = dict(list(template_files.items()) + list(env_files.items())) - - workflow_params.check_deprecated_parameters( - self.clients, - stack_name, - template, - files, - env_files_tracker, - self.working_dir) - - self.log.info("Deploying templates in the directory {0}".format( - os.path.abspath(tht_root))) - deployment.deploy_without_plan( - self.clients, stack_name, - template, files, env_files_tracker, - self.log, self.working_dir) - - def create_template_dirs(self, parsed_args): - tht_root = os.path.abspath(parsed_args.templates) - new_tht_root = "%s/tripleo-heat-templates" % self.working_dir - self.log.debug("Creating working templates tree in %s" - % new_tht_root) - roles_file_path = utils.get_roles_file_path(self.working_dir, - parsed_args.stack) - networks_file_path = utils.get_networks_file_path(self.working_dir, - parsed_args.stack) - shutil.rmtree(new_tht_root, ignore_errors=True) - shutil.copytree(tht_root, new_tht_root, symlinks=True) - utils.jinja_render_files(self.log, - templates=parsed_args.templates, - working_dir=new_tht_root, - roles_file=roles_file_path, - networks_file=networks_file_path, - base_path=new_tht_root) - return new_tht_root, tht_root - - def create_env_files(self, parsed_args, - new_tht_root, user_tht_root): - self.log.debug("Creating Environment files") - # A dictionary to store resource registry types that are internal, - # and should not be overridden in user provided environments. - protected_overrides = {'registry_entries': dict()} - created_env_files = [ - os.path.join(new_tht_root, constants.DEFAULT_RESOURCE_REGISTRY)] - - parameters = utils.build_enabled_sevices_image_params( - created_env_files, parsed_args, new_tht_root, user_tht_root, - self.working_dir) - - self._update_parameters( - parsed_args, parameters, new_tht_root, user_tht_root) - - param_env = utils.create_parameters_env( - parameters, new_tht_root, parsed_args.stack) - created_env_files.extend(param_env) - - if parsed_args.baremetal_deployment is not None: - created_env_files.extend( - self._provision_networks(parsed_args, new_tht_root, - protected_overrides)) - created_env_files.extend( - self._provision_virtual_ips(parsed_args, new_tht_root, - protected_overrides)) - self._unprovision_baremetal(parsed_args) - created_env_files.extend( - self._provision_baremetal(parsed_args, new_tht_root, - protected_overrides)) - - user_environments = [] - if parsed_args.environment_directories: - user_environments.extend(utils.load_environment_directories( - parsed_args.environment_directories)) - - if parsed_args.environment_files: - user_environments.extend(parsed_args.environment_files) - - if (not parsed_args.disable_protected_resource_types - and user_environments): - rewritten_user_environments = [] - for env_path in user_environments: - env_path, abs_env_path = utils.rewrite_env_path( - env_path, new_tht_root, user_tht_root) - rewritten_user_environments.append((env_path, abs_env_path)) - utils.check_prohibited_overrides(protected_overrides, - rewritten_user_environments) - utils.duplicate_param_check(user_environments=user_environments) - created_env_files.extend(user_environments) - - return created_env_files - - def deploy_tripleo_heat_templates(self, parsed_args, - new_tht_root, user_tht_root, - created_env_files): - """Deploy the fixed templates in TripleO Heat Templates""" - - self.log.info("Processing templates in the directory {0}".format( - os.path.abspath(new_tht_root))) - - deployment_options = {} - if parsed_args.deployment_python_interpreter: - deployment_options['ansible_python_interpreter'] = \ - parsed_args.deployment_python_interpreter - - self.log.debug("Processing environment files %s" % created_env_files) - env_files_tracker = [] - env_files, env = utils.process_multiple_environments( - created_env_files, new_tht_root, user_tht_root, - env_files_tracker=env_files_tracker, - cleanup=(not parsed_args.no_cleanup)) - - # Copy the env_files to tmp folder for archiving - utils.copy_env_files(env_files, new_tht_root) - - if parsed_args.limit: - # check if skip list is defined while using --limit and throw a - # warning if necessary - self._check_limit_skiplist_warning(env) - - # check if we're trying to deploy ceph during the overcloud deployment - utils.check_deployed_ceph_stage(env) - - old_stack_env = utils.get_saved_stack_env( - self.working_dir, parsed_args.stack) - if old_stack_env: - if not parsed_args.disable_validations: - ceph_deployed = env.get('resource_registry', {}).get( - 'OS::TripleO::Services::CephMon', 'OS::Heat::None') - ceph_external = env.get('resource_registry', {}).get( - 'OS::TripleO::Services::CephExternal', 'OS::Heat::None') - # note (fpantano) if ceph is not TripleO deployed and no - # external ceph cluster are present, there's no reason to - # make this check and we can simply ignore it - if (ceph_deployed != "OS::Heat::None" - or ceph_external != "OS::Heat::None"): - utils.check_ceph_fsid_matches_env_files(old_stack_env, env) - # upgrades: check if swift is deployed - utils.check_swift_and_rgw(old_stack_env, env, - self.__class__.__name__) - # check migration to service vips managed by servce - utils.check_service_vips_migrated_to_service(env) - - # check if ceph-ansible env is present - utils.check_ceph_ansible(env.get('resource_registry', {}), - self.__class__.__name__) - utils.check_neutron_resources(env) - - self._try_overcloud_deploy_with_compat_yaml( - new_tht_root, - parsed_args.stack, env_files, - parsed_args.timeout, env, - parsed_args.run_validations, - parsed_args.roles_file, - env_files_tracker=env_files_tracker, - deployment_options=deployment_options) - - def _try_overcloud_deploy_with_compat_yaml(self, tht_root, - stack_name, - env_files, timeout, - env, run_validations, - roles_file, - env_files_tracker=None, - deployment_options=None): - overcloud_yaml = os.path.join(tht_root, constants.OVERCLOUD_YAML_NAME) - try: - self._heat_deploy(stack_name, overcloud_yaml, - env_files, timeout, - tht_root, env, - run_validations, - roles_file, - env_files_tracker=env_files_tracker, - deployment_options=deployment_options) - except oscexc.CommandError as e: - messages = 'Failed to deploy: %s' % str(e) - raise ValueError(messages) - - def _deploy_postconfig(self, parsed_args): - self.log.debug("_deploy_postconfig(%s)" % parsed_args) - - overcloud_endpoint = utils.get_overcloud_endpoint(self.working_dir) - # NOTE(jaosorior): The overcloud endpoint can contain an IP address or - # an FQDN depending on how what it's configured to output in the - # tripleo-heat-templates. Such a configuration can be done by - # overriding the EndpointMap through parameter_defaults. - overcloud_ip_or_fqdn = urllib.parse.urlparse( - overcloud_endpoint).hostname - - keystone_admin_ip = utils.get_stack_saved_output_item( - 'KeystoneAdminVip', self.working_dir) - no_proxy = os.environ.get('no_proxy', overcloud_ip_or_fqdn) - no_proxy_list = map(utils.bracket_ipv6, - [no_proxy, overcloud_ip_or_fqdn, - keystone_admin_ip]) - os.environ['no_proxy'] = ','.join([x for x in no_proxy_list if x]) - - utils.remove_known_hosts(overcloud_ip_or_fqdn) - - def _provision_baremetal(self, parsed_args, tht_root, protected_overrides): - - baremetal_file = utils.get_baremetal_file_path(self.working_dir, - parsed_args.stack) - if not baremetal_file: - return [] - - baremetal_file_dir = os.path.dirname(baremetal_file) - with open(baremetal_file, 'r') as fp: - roles = yaml.safe_load(fp) - - utils.validate_roles_playbooks(baremetal_file_dir, roles) - - key = self.get_key_pair(parsed_args) - with open('{}.pub'.format(key), 'rt') as fp: - ssh_key = fp.read() - - output_path = utils.build_user_env_path( - 'baremetal-deployed.yaml', - tht_root - ) - extra_vars = { - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "baremetal_deployed_path": output_path, - "ssh_private_key_file": key, - "ssh_public_keys": ssh_key, - "ssh_user_name": parsed_args.overcloud_ssh_user, - "manage_network_ports": True, - "configure_networking": parsed_args.network_config, - "working_dir": self.working_dir, - "templates": parsed_args.templates, - } - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-overcloud-node-provision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - utils.run_role_playbooks(self, self.working_dir, baremetal_file_dir, - roles, parsed_args.network_config) - - utils.extend_protected_overrides(protected_overrides, output_path) - - return [output_path] - - def _unprovision_baremetal(self, parsed_args): - - baremetal_file = utils.get_baremetal_file_path(self.working_dir, - parsed_args.stack) - if not baremetal_file: - return - - with open(baremetal_file, 'r') as fp: - roles = yaml.safe_load(fp) - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-overcloud-node-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "prompt": False, - "manage_network_ports": True, - } - ) - - def _provision_networks(self, parsed_args, tht_root, protected_overrides): - # Parse the network data, if any network have 'ip_subnet' or - # 'ipv6_subnet' keys this is not a network-v2 format file. In this - # case do nothing. - networks_file_path = utils.get_networks_file_path( - self.working_dir, parsed_args.stack) - - if not utils.is_network_data_v2(networks_file_path): - return [] - - output_path = utils.build_user_env_path( - 'networks-deployed.yaml', - tht_root) - extra_vars = { - "network_data_path": networks_file_path, - "network_deployed_path": output_path, - "overwrite": True, - "templates": parsed_args.templates, - } - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-overcloud-network-provision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - - utils.extend_protected_overrides(protected_overrides, output_path) - - return [output_path] - - def _provision_virtual_ips(self, parsed_args, tht_root, - protected_overrides): - networks_file_path = utils.get_networks_file_path(self.working_dir, - parsed_args.stack) - if not utils.is_network_data_v2(networks_file_path): - return [] - - vip_file_path = utils.get_vip_file_path(self.working_dir, - parsed_args.stack) - - output_path = utils.build_user_env_path( - 'virtual-ips-deployed.yaml', - tht_root) - - extra_vars = { - "stack_name": parsed_args.stack, - "vip_data_path": vip_file_path, - "vip_deployed_path": output_path, - "overwrite": True, - "templates": parsed_args.templates, - } - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-overcloud-network-vip-provision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - - utils.extend_protected_overrides(protected_overrides, output_path) - - return [output_path] - - def _export_stack(self, parsed_args, should_filter, - config_download_dir, export_file): - # Create overcloud export - data = export.export_overcloud( - self.working_dir, - parsed_args.stack, True, should_filter, - config_download_dir) - # write the exported data - with open(export_file, 'w') as f: - yaml.safe_dump(data, f, default_flow_style=False) - os.chmod(export_file, 0o600) - - def setup_ephemeral_heat(self, parsed_args): - self.log.info("Using ephemeral heat for stack operation") - self.heat_launcher = utils.get_heat_launcher( - parsed_args.heat_type, - api_container_image=parsed_args.heat_container_api_image, - engine_container_image=parsed_args.heat_container_engine_image, - heat_dir=os.path.join(self.working_dir, - 'heat-launcher'), - use_tmp_dir=False, - rm_heat=parsed_args.rm_heat, - skip_heat_pull=parsed_args.skip_heat_pull) - self.orchestration_client = utils.launch_heat(self.heat_launcher) - self.clients.orchestration = self.orchestration_client - - def get_parser(self, prog_name): - # add_help doesn't work properly, set it to False: - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - parser.add_argument( - '--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES, - help=_("The directory containing the Heat templates to deploy"), - ) - parser.add_argument('--stack', - help=_("Stack name to create or update"), - default='overcloud') - parser.add_argument('--timeout', '-t', metavar='', - type=int, default=240, - help=_('Deployment timeout in minutes.')) - parser.add_argument('--libvirt-type', - choices=['kvm', 'qemu'], - help=_('Libvirt domain type.')) - parser.add_argument('--ntp-server', - help=_('The NTP for overcloud nodes. ')) - parser.add_argument( - '--no-proxy', - default=os.environ.get('no_proxy', ''), - help=_('A comma separated list of hosts that should not be ' - 'proxied.') - ) - parser.add_argument( - '--overcloud-ssh-user', - default='tripleo-admin', - help=_('User for ssh access to overcloud nodes') - ) - parser.add_argument( - '--overcloud-ssh-key', - default=None, - help=_('Key path for ssh access to overcloud nodes. When' - 'undefined the key will be autodetected.') - ) - parser.add_argument( - '--overcloud-ssh-network', - help=_('Network name to use for ssh access to overcloud nodes.'), - default='ctlplane' - ) - parser.add_argument( - '--overcloud-ssh-enable-timeout', - help=_('This option no longer has any effect.'), - type=int, - default=constants.ENABLE_SSH_ADMIN_TIMEOUT - ) - parser.add_argument( - '--overcloud-ssh-port-timeout', - help=_('Timeout for the ssh port to become active.'), - type=int, - default=constants.ENABLE_SSH_ADMIN_SSH_PORT_TIMEOUT - ) - parser.add_argument( - '--environment-file', '-e', metavar='', - action='append', dest='environment_files', - help=_('Environment files to be passed to the heat stack-create ' - 'or heat stack-update command. (Can be specified more than ' - 'once.)') - ) - parser.add_argument( - '--environment-directory', metavar='', - action='append', dest='environment_directories', - default=[os.path.expanduser(constants.DEFAULT_ENV_DIRECTORY)], - help=_('Environment file directories that are automatically ' - ' added to the heat stack-create or heat stack-update' - ' commands. Can be specified more than once. Files in' - ' directories are loaded in ascending sort order.') - ) - parser.add_argument( - '--roles-file', '-r', dest='roles_file', - help=_('Roles file, overrides the default %s in the --templates ' - 'directory. May be an absolute path or the path relative ' - ' to --templates') % constants.OVERCLOUD_ROLES_FILE - ) - parser.add_argument( - '--networks-file', '-n', dest='networks_file', - help=_('Networks file, overrides the default %s in the ' - '--templates directory') % constants.OVERCLOUD_NETWORKS_FILE - ) - parser.add_argument( - '--vip-file', dest='vip_file', - help=_('Configuration file describing the network Virtual IPs.')) - parser.add_argument( - '--no-cleanup', action='store_true', - help=_('Don\'t cleanup temporary files, just log their location') - ) - parser.add_argument( - '--update-plan-only', - action='store_true', - help=_('DEPRECATED: Only update the plan. Do not perform the ' - 'actual deployment. NOTE: Will move to a discrete command ' - 'in a future release. Not supported anymore.') - ) - parser.add_argument( - '--validation-errors-nonfatal', - dest='validation_errors_fatal', - action='store_false', - default=True, - help=_('Allow the deployment to continue in spite of validation ' - 'errors. Note that attempting deployment while errors ' - 'exist is likely to fail.') - ) - parser.add_argument( - '--validation-warnings-fatal', - action='store_true', - default=False, - help=_('Exit if there are warnings from the configuration ' - 'pre-checks.') - ) - parser.add_argument( - '--disable-validations', - action='store_true', - default=True, - help=_('DEPRECATED. Disable the pre-deployment validations ' - 'entirely. These validations are the built-in ' - 'pre-deployment validations. To enable external ' - 'validations from tripleo-validations, ' - 'use the --run-validations flag. These validations are ' - 'now run via the external validations in ' - 'tripleo-validations.')) - parser.add_argument( - '--inflight-validations', - action='store_true', - default=False, - dest='inflight', - help=_('Activate in-flight validations during the deploy. ' - 'In-flight validations provide a robust way to ensure ' - 'deployed services are running right after their ' - 'activation. Defaults to False.') - ) - parser.add_argument( - '--dry-run', - action='store_true', - default=False, - help=_('Only run validations, but do not apply any changes.') - ) - parser.add_argument( - '--run-validations', - action='store_true', - default=False, - help=_('Run external validations from the tripleo-validations ' - 'project.')) - parser.add_argument( - '--skip-postconfig', - action='store_true', - default=False, - help=_('Skip the overcloud post-deployment configuration.') - ) - parser.add_argument( - '--force-postconfig', - action='store_true', - default=False, - help=_('Force the overcloud post-deployment configuration.') - ) - parser.add_argument( - '--skip-deploy-identifier', - action='store_true', - default=False, - help=_('Skip generation of a unique identifier for the ' - 'DeployIdentifier parameter. The software configuration ' - 'deployment steps will only be triggered if there is an ' - 'actual change to the configuration. This option should ' - 'be used with Caution, and only if there is confidence ' - 'that the software configuration does not need to be ' - 'run, such as when scaling out certain roles.') - ) - parser.add_argument( - '--answers-file', - help=_('Path to a YAML file with arguments and parameters.') - ) - parser.add_argument( - '--disable-password-generation', - action='store_true', - default=False, - help=_('Disable password generation.') - ) - parser.add_argument( - '--deployed-server', - action='store_true', - default=True, - help=_('DEPRECATED: Use pre-provisioned overcloud nodes.' - 'Now the default and this CLI option has no effect.') - ) - parser.add_argument( - '--provision-nodes', - action='store_false', - dest='deployed_server', - default=True, - help=_('DEPRECATED: Provision overcloud nodes with heat.' - 'This method is no longer supported.') - ) - parser.add_argument( - '--config-download', - action='store_true', - default=True, - help=_('DEPRECATED: Run deployment via config-download mechanism. ' - 'This is now the default, and this CLI options has no ' - 'effect.') - ) - parser.add_argument( - '--no-config-download', - '--stack-only', - action='store_true', - default=False, - dest='stack_only', - help=_('Disable the config-download workflow and only create ' - 'the stack and download the config. No software ' - 'configuration, setup, or any changes will be applied ' - 'to overcloud nodes.') - ) - parser.add_argument( - '--config-download-only', - action='store_true', - default=False, - help=_('Disable the stack create and setup, and only run the ' - 'config-download workflow to apply the software ' - 'configuration. Requires that config-download setup ' - 'was previously completed, either with --stack-only ' - 'and --setup-only or a full deployment') - ) - parser.add_argument( - '--setup-only', - action='store_true', - default=False, - help=_('Disable the stack and config-download workflow to apply ' - 'the software configuration and only run the setup to ' - 'enable ssh connectivity.') - ) - parser.add_argument( - '--config-dir', - dest='config_dir', - default=None, - help=_('The directory where the configuration files will be ' - 'pushed'), - ) - parser.add_argument( - '--config-type', - dest='config_type', - type=list, - default=None, - help=_('Only used when "--setup-only" is invoked. ' - 'Type of object config to be extract from the deployment, ' - 'defaults to all keys available'), - ) - parser.add_argument( - '--no-preserve-config', - dest='preserve_config_dir', - action='store_false', - default=True, - help=('Only used when "--setup-only" is invoked. ' - 'If specified, will delete and recreate the --config-dir ' - 'if it already exists. Default is to use the existing dir ' - 'location and overwrite files. Files in --config-dir not ' - 'from the stack will be preserved by default.') - ) - parser.add_argument( - '--output-dir', - action='store', - default=None, - help=_('Directory to use for saved output when using ' - '--config-download. When not ' - 'specified, /config-download will be used.') - ) - parser.add_argument( - '--override-ansible-cfg', - action='store', - default=None, - help=_('Path to ansible configuration file. The configuration ' - 'in the file will override any configuration used by ' - 'config-download by default.') - ) - parser.add_argument( - '--config-download-timeout', - action='store', - type=int, - default=None, - help=_('Timeout (in minutes) to use for config-download steps. If ' - 'unset, will default to however much time is leftover ' - 'from the --timeout parameter after the stack operation.') - ) - parser.add_argument('--deployment-python-interpreter', default=None, - help=_('The path to python interpreter to use for ' - 'the deployment actions. This may need to ' - 'be used if deploying on a python2 host ' - 'from a python3 system or vice versa.')) - parser.add_argument( - '-b', '--baremetal-deployment', - metavar='', - nargs='?', - const=True, - help=_('Deploy baremetal nodes, network and virtual IP addresses ' - 'as defined in baremetal_deployment.yaml along with ' - 'overcloud. If no baremetal_deployment YAML file is given, ' - 'the tripleo--baremetal-deployment.yaml file ' - 'in the working-dir will be used.')) - parser.add_argument('--network-config', - help=_('Apply network config to provisioned ' - 'nodes.'), - default=False, - action="store_true") - parser.add_argument( - '--limit', - action='store', - default=None, - help=_("A string that identifies a single node or comma-separated" - "list of nodes the config-download Ansible playbook " - "execution will be limited to. For example: --limit" - " \"compute-0,compute-1,compute-5\".") - ) - parser.add_argument( - '--tags', - action='store', - default=None, - help=_('A list of tags to use when running the the config-download' - ' ansible-playbook command.') - ) - parser.add_argument( - '--skip-tags', - action='store', - default=None, - help=_('A list of tags to skip when running the the' - ' config-download ansible-playbook command.') - ) - parser.add_argument( - '--ansible-forks', - action='store', - default=None, - type=int, - help=_('The number of Ansible forks to use for the' - ' config-download ansible-playbook command.') - ) - parser.add_argument( - '--disable-container-prepare', - action='store_true', - default=False, - help=_('Disable the container preparation actions to prevent ' - 'container tags from being updated and new containers ' - 'from being fetched. If you skip this but do not have ' - 'the container parameters configured, the deployment ' - 'action may fail.') - ) - parser.add_argument( - '--working-dir', - action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy/"') - ) - parser.add_argument( - '--heat-type', - action='store', - default='pod', - choices=['pod', 'container', 'native'], - help=_('The type of Heat process to use to execute ' - 'the deployment.\n' - 'pod (Default): Use an ephemeral Heat pod.\n' - 'container (Experimental): Use an ephemeral Heat ' - 'container.\n' - 'native (Experimental): Use an ephemeral Heat process.') - ) - parser.add_argument( - '--heat-container-api-image', - metavar='', - dest='heat_container_api_image', - default=constants.DEFAULT_EPHEMERAL_HEAT_API_CONTAINER, - help=_('The container image to use when launching the heat-api ' - 'process. Only used when --heat-type=pod. ' - 'Defaults to: {}'.format( - constants.DEFAULT_EPHEMERAL_HEAT_API_CONTAINER)) - ) - parser.add_argument( - '--heat-container-engine-image', - metavar='', - dest='heat_container_engine_image', - default=constants.DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER, - help=_('The container image to use when launching the heat-engine ' - 'process. Only used when --heat-type=pod. ' - 'Defaults to: {}'.format( - constants.DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER)) - ) - parser.add_argument( - '--rm-heat', - action='store_true', - default=False, - help=_('If specified and --heat-type is container or pod ' - 'any existing container or pod of a previous ' - 'ephemeral Heat process will be deleted first. ' - 'Ignored if --heat-type is native.') - ) - parser.add_argument( - '--skip-heat-pull', - action='store_true', - default=False, - help=_('When --heat-type is pod or container, assume ' - 'the container image has already been pulled ') - ) - parser.add_argument( - '--disable-protected-resource-types', - action='store_true', - default=False, - help=_('Disable protected resource type overrides. Resources ' - 'types that are used internally are protected, and cannot ' - 'be overridden in the user environment. Setting this ' - 'argument disables the protection, allowing the protected ' - 'resource types to be override in the user environment.') - ) - parser.add_argument( - '-y', '--yes', default=False, - action='store_true', - help=_('Use -y or --yes to skip any confirmation required before ' - 'the deploy operation. Use this with caution!') - ) - parser.add_argument( - '--allow-deprecated-network-data', default=False, - action='store_true', - help=_('Set this to allow using deprecated network data YAML ' - 'definition schema.') - ) - - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - - if (parsed_args.networks_file and - (not parsed_args.yes - and not parsed_args.allow_deprecated_network_data)): - if not utils.is_network_data_v2(parsed_args.networks_file): - confirm = utils.prompt_user_for_confirmation( - 'DEPRECATED network data definition {} provided. Please ' - 'update the network data definition to version 2.\n' - 'Do you still wish to continue with deployment [y/N]' - .format(parsed_args.networks_file), - self.log) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - if not parsed_args.working_dir: - self.working_dir = utils.get_default_working_dir( - parsed_args.stack) - else: - self.working_dir = parsed_args.working_dir - utils.makedirs(self.working_dir) - utils.check_deploy_backups(self.working_dir) - - if parsed_args.update_plan_only: - raise exceptions.DeploymentError( - 'Only plan update is not supported.') - - deploy_status = 'DEPLOY_SUCCESS' - deploy_message = 'successfully' - - self._setup_clients(parsed_args) - - _update_args_from_answers_file(parsed_args) - - _validate_args(parsed_args) - - # Make a copy of the files provided on command line in the working dir - # If the command is re-run without providing the argument the "backup" - # from the previous run in the working dir is used. - utils.update_working_dir_defaults(self.working_dir, parsed_args) - - if parsed_args.vip_file: - _validate_vip_file(parsed_args.stack, self.working_dir) - - # Throw warning if deprecated service is enabled and - # ask user if deployment should still be continued. - if parsed_args.environment_files: - utils.check_deprecated_service_is_enabled( - parsed_args.environment_files) - - if parsed_args.dry_run: - self.log.info("Validation Finished") - return - - self.heat_launcher = None - start = time.time() - - new_tht_root, user_tht_root = \ - self.create_template_dirs(parsed_args) - created_env_files = self.create_env_files( - parsed_args, new_tht_root, user_tht_root) - - # full_deploy means we're doing a full deployment - # e.g., no --*-only args were passed - full_deploy = not (parsed_args.stack_only or parsed_args.setup_only or - parsed_args.config_download_only) - # do_stack is True when: - # --stack-only - # a full deployment - do_stack = (parsed_args.stack_only or full_deploy) - # do_setup is True when: - # --setup-only OR - # a full deployment - do_setup = parsed_args.setup_only or full_deploy - # do_config_download is True when: - # --config-download-only OR - # a full deployment - do_config_download = parsed_args.config_download_only or full_deploy - - config_download_dir = parsed_args.output_dir or \ - os.path.join(self.working_dir, "config-download") - horizon_url = None - overcloud_endpoint = None - old_rcpath = None - rcpath = None - - # All code within this "try" block requires Heat, and no other code - # outside the block should require Heat. With ephemeral Heat, the Heat - # pods will be cleaned up in the "finally" clause, such that it's not - # running during later parts of overcloud deploy. - self.log.info("Deploying overcloud.") - deployment.set_deployment_status( - parsed_args.stack, - status='DEPLOYING', - working_dir=self.working_dir) - - try: - if do_stack: - self.setup_ephemeral_heat(parsed_args) - - self.deploy_tripleo_heat_templates( - parsed_args, new_tht_root, - user_tht_root, created_env_files) - - stack = utils.get_stack( - self.orchestration_client, parsed_args.stack) - utils.save_stack(stack, self.working_dir) - - horizon_url = deployment.get_horizon_url( - stack=stack.stack_name, - heat_type=parsed_args.heat_type, - working_dir=self.working_dir) - - overcloud_endpoint = utils.get_overcloud_endpoint( - self.working_dir) - overcloud_admin_vip = utils.get_stack_saved_output_item( - 'KeystoneAdminVip', self.working_dir) - rc_params = utils.get_rc_params(self.working_dir) - - # For backwards compatibility, we will also write overcloudrc - # to $HOME and then self.working_dir. - old_rcpath = deployment.create_overcloudrc( - parsed_args.stack, overcloud_endpoint, overcloud_admin_vip, - rc_params, parsed_args.no_proxy) - rcpath = deployment.create_overcloudrc( - parsed_args.stack, overcloud_endpoint, overcloud_admin_vip, - rc_params, parsed_args.no_proxy, self.working_dir) - - # Download config - config_dir = parsed_args.config_dir or config_download_dir - config_type = parsed_args.config_type - preserve_config_dir = parsed_args.preserve_config_dir - key_file = utils.get_key(parsed_args.stack) - extra_vars = { - 'plan': parsed_args.stack, - 'config_dir': config_dir, - 'preserve_config': preserve_config_dir, - 'output_dir': config_download_dir, - 'ansible_ssh_private_key_file': key_file, - 'ssh_network': parsed_args.overcloud_ssh_network, - 'python_interpreter': - parsed_args.deployment_python_interpreter, - } - if parsed_args.config_type: - extra_vars['config_type'] = config_type - - playbook = 'cli-config-download.yaml' - ansible_work_dir = os.path.join( - self.working_dir, os.path.splitext(playbook)[0]) - utils.run_ansible_playbook( - playbook='cli-config-download.yaml', - inventory='localhost,', - workdir=ansible_work_dir, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - reproduce_command=True, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars - ) - except (BaseException, Exception): - with excutils.save_and_reraise_exception(): - deploy_status = 'DEPLOY_FAILED' - deploy_message = 'with error' - deployment.set_deployment_status( - parsed_args.stack, - status=deploy_status, - working_dir=self.working_dir) - - finally: - if self.heat_launcher: - self.log.info("Stopping ephemeral heat.") - utils.kill_heat(self.heat_launcher) - utils.rm_heat(self.heat_launcher) - try: - if do_setup: - deployment.get_hosts_and_enable_ssh_admin( - parsed_args.stack, - parsed_args.overcloud_ssh_network, - parsed_args.overcloud_ssh_user, - self.get_key_pair(parsed_args), - parsed_args.overcloud_ssh_port_timeout, - self.working_dir, - verbosity=utils.playbook_verbosity(self=self), - heat_type=parsed_args.heat_type - ) - - if do_config_download: - if parsed_args.config_download_timeout: - timeout = parsed_args.config_download_timeout - else: - used = int((time.time() - start) // 60) - timeout = parsed_args.timeout - used - if timeout <= 0: - raise exceptions.DeploymentError( - 'Deployment timed out after %sm' % used) - - deployment_options = {} - if parsed_args.deployment_python_interpreter: - deployment_options['ansible_python_interpreter'] = \ - parsed_args.deployment_python_interpreter - - deployment.make_config_download_dir(config_download_dir, - parsed_args.stack) - - deployment.config_download( - self.log, - self.clients, - parsed_args.stack, - parsed_args.overcloud_ssh_network, - config_download_dir, - parsed_args.override_ansible_cfg, - timeout=parsed_args.overcloud_ssh_port_timeout, - verbosity=utils.playbook_verbosity(self=self), - deployment_options=deployment_options, - in_flight_validations=parsed_args.inflight, - deployment_timeout=timeout, - tags=parsed_args.tags, - skip_tags=parsed_args.skip_tags, - limit_hosts=utils.playbook_limit_parse( - limit_nodes=parsed_args.limit - ), - forks=parsed_args.ansible_forks, - denyed_hostnames=utils.get_stack_saved_output_item( - 'BlacklistedHostnames', self.working_dir)) - deployment.set_deployment_status( - parsed_args.stack, - status=deploy_status, - working_dir=self.working_dir) - except (BaseException, Exception): - with excutils.save_and_reraise_exception(): - deploy_status = 'DEPLOY_FAILED' - deploy_message = 'with error' - deployment.set_deployment_status( - parsed_args.stack, - status=deploy_status, - working_dir=self.working_dir) - finally: - try: - # Run postconfig on create or force - if (stack or parsed_args.force_postconfig - and not parsed_args.skip_postconfig): - self._deploy_postconfig(parsed_args) - except Exception as e: - self.log.error('Exception during postconfig') - self.log.error(e) - - try: - # Copy clouds.yaml to the cloud user directory - user = \ - getpwuid(os.stat(constants.CLOUD_HOME_DIR).st_uid).pw_name - utils.copy_clouds_yaml(user) - except Exception as e: - self.log.error('Exception creating clouds.yaml') - self.log.error(e) - - try: - utils.create_tempest_deployer_input( - output_dir=self.working_dir) - except Exception as e: - self.log.error('Exception creating tempest configuration.') - self.log.error(e) - - try: - if do_stack: - # Create overcloud export - self._export_stack( - parsed_args, False, - config_download_dir, - os.path.join( - self.working_dir, "%s-export.yaml" % - parsed_args.stack)) - # Create overcloud cell export - self._export_stack( - parsed_args, True, - config_download_dir, - os.path.join( - self.working_dir, "%s-cell-export.yaml" % - parsed_args.stack)) - except Exception as e: - self.log.error('Exception creating overcloud export.') - self.log.error(e) - - if do_config_download: - print("Overcloud Endpoint: {0}".format(overcloud_endpoint)) - print("Overcloud Horizon Dashboard URL: {0}".format( - horizon_url)) - print("Overcloud rc file: {} and {}".format( - rcpath, old_rcpath)) - print("Overcloud Deployed {0}".format(deploy_message)) - - try: - if parsed_args.output_dir: - ansible_dir = config_download_dir - else: - ansible_dir = None - archive_filename = utils.archive_deploy_artifacts( - self.log, parsed_args.stack, self.working_dir, ansible_dir) - utils.create_archive_dir() - utils.run_command( - ['sudo', 'cp', archive_filename, - constants.TRIPLEO_ARCHIVE_DIR]) - except Exception as e: - self.log.error('Exception archiving deploy artifacts') - self.log.error(e) - - -class GetDeploymentStatus(command.Command): - """Get deployment status""" - - log = logging.getLogger(__name__ + ".GetDeploymentStatus") - - def get_parser(self, prog_name): - parser = super(GetDeploymentStatus, self).get_parser(prog_name) - parser.add_argument('--plan', '--stack', - help=_('Name of the stack/plan. ' - '(default: overcloud)'), - default='overcloud') - parser.add_argument( - '--working-dir', - action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files are stored.\n' - 'Defaults to "$HOME/overcloud-deploy/"')) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - stack = parsed_args.plan - if not parsed_args.working_dir: - working_dir = utils.get_default_working_dir(stack) - else: - working_dir = parsed_args.working_dir - - status = deployment.get_deployment_status( - self.app.client_manager, - stack, - working_dir - ) - - if not status: - print('No deployment was found for %s' % stack) - return - - table = PrettyTable( - ['Stack Name', 'Deployment Status']) - table.add_row([stack, status]) - print(table, file=self.app.stdout) diff --git a/tripleoclient/v1/overcloud_export.py b/tripleoclient/v1/overcloud_export.py deleted file mode 100644 index ecadc3c4d..000000000 --- a/tripleoclient/v1/overcloud_export.py +++ /dev/null @@ -1,112 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -import logging -import os.path -import yaml - -from osc_lib.i18n import _ - -from tripleoclient import command -from tripleoclient import export -from tripleoclient import utils - - -class ExportOvercloud(command.Command): - """Export stack information used as import of another stack""" - - log = logging.getLogger(__name__ + ".ExportOvercloud") - now = datetime.now().strftime('%Y%m%d%H%M%S') - - def get_parser(self, prog_name): - parser = super(ExportOvercloud, self).get_parser(prog_name) - parser.add_argument('--stack', - dest='stack', - metavar='', - help=_('Name of the environment main Heat stack ' - 'to export information from. ' - '(default=overcloud)'), - default='overcloud') - parser.add_argument('--output-file', '-o', metavar='', - help=_('Name of the output file for the stack ' - 'data export. It will default to ' - '".yaml"')) - parser.add_argument('--force-overwrite', '-f', action='store_true', - default=False, - help=_('Overwrite output file if it exists.')) - parser.add_argument( - '--working-dir', - action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files are stored.\n' - 'Defaults to "$HOME/overcloud-deploy/"')) - parser.add_argument('--config-download-dir', - action='store', - help=_('Directory to search for config-download ' - 'export data. Defaults to $HOME/' - 'overcloud-deploy//config-download')) - parser.add_argument('--no-password-excludes', - action='store_true', - dest='no_password_excludes', - help=_('Don''t exclude certain passwords from ' - 'the password export. Defaults to False ' - 'in that some passwords will be excluded ' - 'that are not typically necessary.')) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - stack = parsed_args.stack - self.log.info('Running at %s with parameters %s', - self.now, - parsed_args) - - if not parsed_args.working_dir: - working_dir = utils.get_default_working_dir(stack) - else: - working_dir = parsed_args.working_dir - - if not parsed_args.config_download_dir: - config_download_dir = os.path.join(os.environ.get('HOME'), - "overcloud-deploy", - parsed_args.stack, - 'config-download') - else: - config_download_dir = parsed_args.config_download_dir - - output_file = parsed_args.output_file or os.path.join( - working_dir, - '{}-export.yaml'.format( - parsed_args.stack - ) - ) - export_file_path = os.path.abspath( - os.path.expanduser( - output_file - ) - ) - if (os.path.exists(export_file_path) and - not parsed_args.force_overwrite): - raise Exception( - "File '%s' already exists, not exporting." % export_file_path) - - data = export.export_overcloud( - working_dir, stack, excludes=not parsed_args.no_password_excludes, - should_filter=False, config_download_dir=config_download_dir) - # write the exported data - with open(export_file_path, 'w') as f: - yaml.safe_dump(data, f, default_flow_style=False) - - print("Stack information exported to %s." % export_file_path) diff --git a/tripleoclient/v1/overcloud_export_ceph.py b/tripleoclient/v1/overcloud_export_ceph.py deleted file mode 100644 index 3a4351c0a..000000000 --- a/tripleoclient/v1/overcloud_export_ceph.py +++ /dev/null @@ -1,116 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -import logging -import os.path -import yaml - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient import command -from tripleoclient import export - - -class ExportOvercloudCeph(command.Command): - """Export Ceph information used as import of another stack - - Export Ceph information from one or more stacks to be used - as input of another stack. Creates a valid YAML file with - the CephExternalMultiConfig parameter populated. - """ - - log = logging.getLogger(__name__ + ".ExportOvercloudCeph") - now = datetime.now().strftime('%Y%m%d%H%M%S') - - def get_parser(self, prog_name): - parser = super(ExportOvercloudCeph, self).get_parser(prog_name) - parser.add_argument('--stack', - dest='stack', - metavar='', - help=_('Name of the overcloud stack(s) ' - 'to export Ceph information from. ' - 'If a comma delimited list of stacks is ' - 'passed, Ceph information for all stacks ' - 'will be exported into a single file. ' - '(default=Env: OVERCLOUD_STACK_NAME) '), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('--cephx-key-client-name', '-k', - dest='cephx', - metavar='', - help=_('Name of the cephx client key to export. ' - '(default=openstack)'), - default='openstack') - parser.add_argument('--output-file', '-o', metavar='', - help=_('Name of the output file for the Ceph ' - 'data export. Defaults to ' - '"ceph-export-.yaml" if one ' - 'stack is provided. Defaults to ' - '"ceph-export--stacks.yaml" ' - 'if N stacks are provided.')) - parser.add_argument('--force-overwrite', '-f', action='store_true', - default=False, - help=_('Overwrite output file if it exists.')) - parser.add_argument('--config-download-dir', - action='store', - help=_('Directory to search for config-download ' - 'export data. Defaults to $HOME/' - 'overcloud-deploy//config-download')) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - stacks = parsed_args.stack.split(',') - stack_count = len(stacks) - if stack_count == 1: - name = parsed_args.stack - else: - name = str(stack_count) + '-stacks' - output_file = parsed_args.output_file or \ - 'ceph-export-%s.yaml' % name - - self.log.info('Running at %s with parameters %s', - self.now, - parsed_args) - - if os.path.exists(output_file) and not parsed_args.force_overwrite: - raise Exception( - "File '%s' already exists, not exporting." % output_file) - - # extract ceph data for each stack into the cephs list - cephs = [] - for stack in stacks: - if not parsed_args.config_download_dir: - config_download_dir = os.path.join(os.environ.get('HOME'), - "overcloud-deploy", - stack, - 'config-download') - else: - config_download_dir = parsed_args.config_download_dir - self.log.info('Exporting Ceph data from stack %s at %s', - stack, self.now) - cephs.append(export.export_ceph(stack, - parsed_args.cephx, - config_download_dir)) - data = {} - data['parameter_defaults'] = {} - data['parameter_defaults']['CephExternalMultiConfig'] = cephs - # write the exported data - with open(output_file, 'w') as f: - yaml.safe_dump(data, f, default_flow_style=False) - - print("Ceph information from %s stack(s) exported to %s." % - (len(cephs), output_file)) diff --git a/tripleoclient/v1/overcloud_external_update.py b/tripleoclient/v1/overcloud_external_update.py deleted file mode 100644 index b91af3af4..000000000 --- a/tripleoclient/v1/overcloud_external_update.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os - -from oslo_config import cfg -from oslo_log import log as logging - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient.exceptions import OvercloudUpdateNotConfirmed - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils -from tripleoclient.workflows import deployment - - -CONF = cfg.CONF - - -class ExternalUpdateRun(command.Command): - """Run external minor update Ansible playbook - - This will run the external minor update Ansible playbook, - executing tasks from the undercloud. The update playbooks are - made available after completion of the 'overcloud update - prepare' command. - - """ - - log = logging.getLogger(__name__ + ".ExternalUpdateRun") - - def get_parser(self, prog_name): - parser = super(ExternalUpdateRun, self).get_parser(prog_name) - parser.add_argument('--static-inventory', - dest='static_inventory', - action="store", - default=None, - help=_('DEPRECATED: tripleo-ansible-inventory.yaml' - ' in working dir will be used.') - ) - parser.add_argument("--ssh-user", - dest="ssh_user", - action="store", - default="tripleo-admin", - help=_("DEPRECATED: Only tripleo-admin should be " - "used as ssh user.") - ) - parser.add_argument('--tags', - dest='tags', - action="store", - default="", - help=_('A string specifying the tag or comma ' - 'separated list of tags to be passed ' - 'as --tags to ansible-playbook. ') - ) - parser.add_argument('--skip-tags', - dest='skip_tags', - action="store", - default="", - help=_('A string specifying the tag or comma ' - 'separated list of tags to be passed ' - 'as --skip-tags to ansible-playbook. ') - ) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('-e', '--extra-vars', dest='extra_vars', - action='append', - help=('Set additional variables as key=value or ' - 'yaml/json'), - default=[]) - parser.add_argument('-y', '--yes', default=False, - action='store_true', - help=_("Use -y or --yes to skip the confirmation " - "required before any upgrade " - "operation. Use this with caution! "), - ) - parser.add_argument( - '--limit', - action='store', - default=None, - help=_("A string that identifies a single node or comma-separated" - "list of nodes the config-download Ansible playbook " - "execution will be limited to. For example: --limit" - " \"compute-0,compute-1,compute-5\".") - ) - parser.add_argument( - '--ansible-forks', - action='store', - default=None, - type=int, - help=_('The number of Ansible forks to use for the' - ' config-download ansible-playbook command.') - ) - parser.add_argument( - '--refresh', - action='store_true', - help=_('DEPRECATED: Refresh the config-download playbooks.' - 'Use `overcloud update prepare` instead to refresh ' - 'playbooks.') - ) - - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - oooutils.ensure_run_as_normal_user() - - if (not parsed_args.yes - and not oooutils.prompt_user_for_confirmation( - constants.UPDATE_PROMPT, self.log)): - raise OvercloudUpdateNotConfirmed(constants.UPDATE_NO) - - working_dir = oooutils.get_default_working_dir(parsed_args.stack) - config_download_dir = os.path.join(working_dir, 'config-download') - ansible_dir = os.path.join(config_download_dir, parsed_args.stack) - inventory_path = os.path.join(ansible_dir, - 'tripleo-ansible-inventory.yaml') - key = oooutils.get_key(parsed_args.stack) - playbooks = [os.path.join(ansible_dir, p) - for p in constants.EXTERNAL_UPDATE_PLAYBOOKS] - oooutils.run_ansible_playbook( - playbook=playbooks, - inventory=inventory_path, - workdir=config_download_dir, - tags=parsed_args.tags, - extra_vars=parsed_args.extra_vars, - skip_tags=parsed_args.skip_tags, - limit_hosts=oooutils.playbook_limit_parse( - limit_nodes=parsed_args.limit - ), - forks=parsed_args.ansible_forks, - key=key, - reproduce_command=True - ) - - deployment.snapshot_dir(ansible_dir) - self.log.info("Completed Overcloud External Update Run.") diff --git a/tripleoclient/v1/overcloud_external_upgrade.py b/tripleoclient/v1/overcloud_external_upgrade.py deleted file mode 100644 index fab536adb..000000000 --- a/tripleoclient/v1/overcloud_external_upgrade.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import os - -from oslo_config import cfg -from oslo_log import log as logging - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient.exceptions import OvercloudUpgradeNotConfirmed - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils -from tripleoclient.workflows import deployment - - -CONF = cfg.CONF - - -class ExternalUpgradeRun(command.Command): - """Run external major upgrade Ansible playbook - - This will run the external major upgrade Ansible playbook, - executing tasks from the undercloud. The upgrade playbooks are - made available after completion of the 'overcloud upgrade - prepare' command. - - """ - - log = logging.getLogger(__name__ + ".ExternalUpgradeRun") - - def get_parser(self, prog_name): - parser = super(ExternalUpgradeRun, self).get_parser(prog_name) - parser.add_argument('--static-inventory', - dest='static_inventory', - action="store", - default=None, - help=_('DEPRECATED: tripleo-ansible-inventory.yaml' - ' in working dir will be used.') - ) - parser.add_argument("--ssh-user", - dest="ssh_user", - action="store", - default="tripleo-admin", - help=_("DEPRECATED: Only tripleo-admin should be " - "used as ssh user.") - ) - parser.add_argument('--tags', - dest='tags', - action="store", - default="", - help=_('A string specifying the tag or comma ' - 'separated list of tags to be passed ' - 'as --tags to ansible-playbook. ') - ) - parser.add_argument('--skip-tags', - dest='skip_tags', - action="store", - default="", - help=_('A string specifying the tag or comma ' - 'separated list of tags to be passed ' - 'as --skip-tags to ansible-playbook. ') - ) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('-e', '--extra-vars', dest='extra_vars', - action='append', - help=('Set additional variables as key=value or ' - 'yaml/json'), - default=[]) - parser.add_argument('-y', '--yes', default=False, - action='store_true', - help=_("Use -y or --yes to skip the confirmation " - "required before any upgrade " - "operation. Use this with caution! "), - ) - - parser.add_argument( - '--limit', - action='store', - default=None, - help=_("A string that identifies a single node or comma-separated" - "list of nodes the config-download Ansible playbook " - "execution will be limited to. For example: --limit" - " \"compute-0,compute-1,compute-5\".") - ) - parser.add_argument( - '--ansible-forks', - action='store', - default=None, - type=int, - help=_('The number of Ansible forks to use for the' - ' config-download ansible-playbook command.') - ) - - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - oooutils.ensure_run_as_normal_user() - - if (not parsed_args.yes - and not oooutils.prompt_user_for_confirmation( - constants.UPGRADE_PROMPT, self.log)): - raise OvercloudUpgradeNotConfirmed(constants.UPGRADE_NO) - - working_dir = oooutils.get_default_working_dir(parsed_args.stack) - config_download_dir = os.path.join(working_dir, 'config-download') - ansible_dir = os.path.join(config_download_dir, parsed_args.stack) - inventory_path = os.path.join(ansible_dir, - 'tripleo-ansible-inventory.yaml') - key = oooutils.get_key(parsed_args.stack) - playbooks = [os.path.join(ansible_dir, p) - for p in constants.EXTERNAL_UPGRADE_PLAYBOOKS] - oooutils.run_ansible_playbook( - playbook=playbooks, - inventory=inventory_path, - workdir=config_download_dir, - tags=parsed_args.tags, - skip_tags=parsed_args.skip_tags, - extra_vars=parsed_args.extra_vars, - limit_hosts=oooutils.playbook_limit_parse( - limit_nodes=parsed_args.limit - ), - forks=parsed_args.ansible_forks, - key=key, - reproduce_command=True - ) - - deployment.snapshot_dir(ansible_dir) - self.log.info("Completed Overcloud External Upgrade Run.") diff --git a/tripleoclient/v1/overcloud_image.py b/tripleoclient/v1/overcloud_image.py deleted file mode 100644 index 1e88e7616..000000000 --- a/tripleoclient/v1/overcloud_image.py +++ /dev/null @@ -1,693 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc -import collections -from datetime import datetime -import logging -import os -import re -import subprocess -import sys - -from openstackclient.common.progressbar import VerboseFileWrapper -from keystoneauth1.exceptions import catalog as exc_catalog -from osc_lib import exceptions -from osc_lib.i18n import _ -from prettytable import PrettyTable -import tripleo_common.arch -from tripleo_common.image import build - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as plugin_utils - - -class BuildOvercloudImage(command.Command): - """Build images for the overcloud""" - - auth_required = False - log = logging.getLogger(__name__ + ".BuildOvercloudImage") - - IMAGE_YAML_PATH = "/usr/share/openstack-tripleo-common/image-yaml" - DEFAULT_YAML = ['overcloud-images-python3.yaml', - 'overcloud-images-centos9.yaml'] - REQUIRED_PACKAGES = [ - 'openstack-tripleo-common', - 'openstack-ironic-python-agent-builder', - 'openstack-tripleo-image-elements', - 'openstack-tripleo-puppet-elements', - 'xfsprogs' - ] - - def get_parser(self, prog_name): - parser = super(BuildOvercloudImage, self).get_parser(prog_name) - parser.add_argument( - "--config-file", - dest="config_files", - metavar='', - default=[], - action="append", - help=_("YAML config file specifying the image build. May be " - "specified multiple times. Order is preserved, and later " - "files will override some options in previous files. " - "Other options will append."), - ) - parser.add_argument( - "--image-name", - dest="image_names", - metavar='', - default=None, - help=_("Name of image to build. May be specified multiple " - "times. If unspecified, will build all images in " - "given YAML files."), - ) - parser.add_argument( - "--no-skip", - dest="skip", - action="store_false", - default=True, - help=_("Skip build if cached image exists."), - ) - parser.add_argument( - "--no-package-install", - dest="package_install", - action="store_false", - default=True, - help=_("Skip installing required packages."), - ) - parser.add_argument( - "--output-directory", - dest="output_directory", - default=os.environ.get('TRIPLEO_ROOT', '.'), - help=_("Output directory for images. Defaults to $TRIPLEO_ROOT," - "or current directory if unset."), - ) - parser.add_argument( - "--temp-dir", - dest="temp_dir", - default=os.environ.get('TMPDIR', os.getcwd()), - help=_("Temporary directory to use when building the images. " - "Defaults to $TMPDIR or current directory if unset."), - ) - return parser - - def _ensure_packages_installed(self): - cmd = ['sudo', 'dnf', 'install', '-y'] + self.REQUIRED_PACKAGES - output = plugin_utils.run_command(cmd, - name="Install required packages") - self.log.info(output) - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.package_install: - self._ensure_packages_installed() - - if not parsed_args.config_files: - parsed_args.config_files = [os.path.join(self.IMAGE_YAML_PATH, f) - for f in self.DEFAULT_YAML] - os.environ.update({'TMPDIR': parsed_args.temp_dir}) - manager = build.ImageBuildManager( - parsed_args.config_files, - output_directory=parsed_args.output_directory, - skip=parsed_args.skip, - images=parsed_args.image_names) - manager.build() - - -class BaseClientAdapter(object): - - log = logging.getLogger(__name__ + ".BaseClientAdapter") - - def __init__(self, image_path, progress=False, - update_existing=False, updated=None): - self.progress = progress - self.image_path = image_path - self.update_existing = update_existing - self.updated = updated - - @abc.abstractmethod - def get_image_property(self, image, prop): - pass - - @abc.abstractmethod - def update_or_upload(self, image_name, properties, names_func, - arch, platform=None, - disk_format='raw', container_format='bare'): - pass - - def _copy_file(self, src, dest): - self._make_dirs(path=os.path.dirname(dest)) - cmd = 'sudo cp -f "{0}" "{1}"'.format(src, dest) - self.log.debug(cmd) - subprocess.check_call(cmd, shell=True) - - def _move_file(self, src, dest): - cmd = 'sudo mv "{0}" "{1}"'.format(src, dest) - self.log.debug(cmd) - subprocess.check_call(cmd, shell=True) - - def _convert_image(self, src, dest): - cmd = 'sudo qemu-img convert -O raw "{0}" "{1}"'.format(src, dest) - self.log.debug(cmd) - subprocess.check_call(cmd, shell=True) - - def _make_dirs(self, path): - cmd = 'sudo mkdir -m 0775 -p "{0}"'.format(path) - self.log.debug(cmd) - subprocess.check_call(cmd, shell=True) - - def _files_changed(self, filepath1, filepath2): - return (plugin_utils.file_checksum(filepath1) != - plugin_utils.file_checksum(filepath2)) - - def file_create_or_update(self, src_file, dest_file): - if os.path.isfile(dest_file): - if self._files_changed(src_file, dest_file): - if self.update_existing: - self._copy_file(src_file, dest_file) - else: - print('Image file "%s" already exists and can be updated' - ' with --update-existing.' % dest_file) - else: - print('Image file "%s" is up-to-date, skipping.' % dest_file) - else: - self._copy_file(src_file, dest_file) - - def check_file_exists(self, file_path): - if not os.path.isfile(file_path): - raise exceptions.CommandError( - 'Required file "%s" does not exist.' % file_path - ) - - def read_image_file_pointer(self, filepath): - self.check_file_exists(filepath) - file_descriptor = open(filepath, 'rb') - - if self.progress: - file_descriptor = VerboseFileWrapper(file_descriptor) - - return file_descriptor - - -class FileImageClientAdapter(BaseClientAdapter): - - def __init__(self, local_path, **kwargs): - super(FileImageClientAdapter, self).__init__(**kwargs) - self.local_path = local_path - - def get_image_property(self, image, prop): - if prop == 'kernel_id': - path = os.path.splitext(image.id)[0] + '.vmlinuz' - if os.path.exists(path.replace("file://", "")): - return path - return None - if prop == 'ramdisk_id': - path = os.path.splitext(image.id)[0] + '.initrd' - if os.path.exists(path.replace("file://", "")): - return path - return None - raise ValueError('Unsupported property %s' % prop) - - def _print_image_info(self, image): - table = PrettyTable(['Path', 'Name', 'Size']) - table.add_row([image.id, image.name, image.size]) - print(table, file=sys.stdout) - - def _paths(self, image_name, names_func, arch, platform): - (arch_path, extension) = names_func( - image_name, arch=arch, platform=platform, use_subdir=True) - image_file = image_name + extension - - dest_dir = os.path.split( - os.path.join(self.local_path, arch_path))[0] - return (dest_dir, image_file) - - def _get_image(self, path): - if not os.path.exists(path): - return - stat = os.stat(path) - created_at = datetime.fromtimestamp( - stat.st_mtime).isoformat() - - Image = collections.namedtuple( - 'Image', - 'id, name, checksum, created_at, size' - ) - (dir_path, filename) = os.path.split(path) - (name, extension) = os.path.splitext(filename) - checksum = plugin_utils.file_checksum(path) - - return Image( - id='file://%s' % path, - name=name, - checksum=checksum, - created_at=created_at, - size=stat.st_size - ) - - def _image_changed(self, image, filename): - return image.checksum != plugin_utils.file_checksum(filename) - - def _image_try_update(self, src_path, dest_path): - image = self._get_image(dest_path) - if image: - if self._image_changed(image, src_path): - if self.update_existing: - dest_base, dest_ext = os.path.splitext(dest_path) - dest_datestamp = re.sub( - r'[\-:\.]|(0+$)', '', image.created_at) - dest_mv = dest_base + '_' + dest_datestamp + dest_ext - self._move_file(dest_path, dest_mv) - - if self.updated is not None: - self.updated.append(dest_path) - return None - print('Image "%s" already exists and can be updated' - ' with --update-existing.' % dest_path) - return image - print('Image "%s" is up-to-date, skipping.' % dest_path) - return image - return None - - def _upload_image(self, src_path, dest_path): - dest_dir = os.path.split(dest_path)[0] - if not os.path.isdir(dest_dir): - self._make_dirs(dest_dir) - - self._copy_file(src_path, dest_path) - image = self._get_image(dest_path) - print('Image "%s" was copied.' % image.id, file=sys.stdout) - self._print_image_info(image) - return image - - def update_or_upload(self, image_name, properties, names_func, - arch, platform=None, - disk_format='raw', container_format='bare'): - (dest_dir, image_file) = self._paths( - image_name, names_func, arch, platform) - - src_path = os.path.join(self.image_path, image_file) - dest_path = os.path.join(dest_dir, image_file) - existing_image = self._image_try_update(src_path, dest_path) - if existing_image: - return existing_image - - return self._upload_image(src_path, dest_path) - - -class GlanceClientAdapter(BaseClientAdapter): - - def __init__(self, client, **kwargs): - super(GlanceClientAdapter, self).__init__(**kwargs) - self.client = client - - def _print_image_info(self, image): - table = PrettyTable(['ID', 'Name', 'Disk Format', 'Size', 'Status']) - table.add_row([image.id, image.name, image.disk_format, image.size, - image.status]) - print(table, file=sys.stdout) - - def _get_image(self, name): - """Retrieves 'openstack.image.v2.image.Image' object. - Uses 'openstack.image.v2._proxy.Proxy.find_image' method. - - :param name: name or ID of an image - :type name: `string` - - :returns: Requested image object if one exists, otherwise `None` - :rtype: `openstack.image.v2.image.Image` or `NoneType` - """ - # This would return None by default for an non-existent resorurce - # And DuplicateResource exception if there more than one. - return self.client.find_image(name) - - def _image_changed(self, image, filename): - """Compare the precomputed hash value with the one derived here. - :param image: Image resource - :type image: `openstack.image.v2.image.Image` - :param filename: path to the image file - :type filname: `string` - - :returns: True if image hashes don't match, false otherwise - :rtype: `bool` - """ - if not hasattr(image, 'hash_value'): - raise RuntimeError( - ("The supplied image does not have a hash value set.")) - return image.hash_value != plugin_utils.file_checksum( - filename, image.hash_algo) - - def _image_try_update(self, image_name, image_file): - image = self._get_image(image_name) - if image: - if self._image_changed(image, image_file): - if self.update_existing: - self.client.update_image( - image.id, - name='%s_%s' % (image.name, re.sub(r'[\-:\.]|(0+$)', - '', - image.created_at)) - ) - if self.updated is not None: - self.updated.append(image.id) - return None - print('Image "%s" already exists and can be updated' - ' with --update-existing.' % image_name) - return image - print('Image "%s" is up-to-date, skipping.' % image_name) - return image - return None - - def _upload_image(self, name, data, properties=None, visibility='public', - disk_format='raw', container_format='bare'): - - image = self.client.create_image( - name=name, - visibility=visibility, - disk_format=disk_format, - container_format=container_format, - data=data, - validate_checksum=False - ) - - if properties: - self.client.update_image(image.id, **properties) - # Refresh image info - image = self.client.get_image(image.id) - - print('Image "%s" was uploaded.' % image.name, file=sys.stdout) - self._print_image_info(image) - return image - - def get_image_property(self, image, prop): - return getattr(image, prop) - - def update_or_upload(self, image_name, properties, names_func, - arch, platform=None, - disk_format='raw', container_format='bare'): - - if arch == 'x86_64' and platform is None: - arch = None - - (glance_name, extension) = names_func( - image_name, arch=arch, platform=platform) - - file_path = os.path.join(self.image_path, image_name + extension) - - updated_image = self._image_try_update(glance_name, file_path) - if updated_image: - return updated_image - - with self.read_image_file_pointer(file_path) as data: - return self._upload_image( - name=glance_name, - disk_format=disk_format, - container_format=container_format, - properties=properties, - data=data) - - -class UploadOvercloudImage(command.Command): - """Make existing image files available for overcloud deployment.""" - log = logging.getLogger(__name__ + ".UploadOvercloudImage") - - def _get_client_adapter(self, parsed_args): - kwargs = { - 'progress': parsed_args.progress, - 'image_path': parsed_args.image_path, - 'update_existing': parsed_args.update_existing, - 'updated': self.updated - } - if not parsed_args.local: - try: - return GlanceClientAdapter(self.app.client_manager.image, - **kwargs) - except exc_catalog.EndpointNotFound: - # Missing endpoint implies local-only upload - pass - - return FileImageClientAdapter(parsed_args.local_path, **kwargs) - - def _get_environment_var(self, envvar, default, deprecated=[]): - for env_key in deprecated: - if env_key in os.environ: - self.log.warning(('Found deprecated environment var \'%s\', ' - 'please use \'%s\' instead' % (env_key, - envvar))) - return os.environ.get(env_key) - return os.environ.get(envvar, default) - - def _get_image_filename(self, parsed_args): - if parsed_args.os_image_name: - return parsed_args.os_image_name - if os.path.exists(os.path.join(parsed_args.image_path, - constants.DEFAULT_WHOLE_DISK_IMAGE)): - return constants.DEFAULT_WHOLE_DISK_IMAGE - return constants.DEFAULT_PARTITION_IMAGE - - def get_parser(self, prog_name): - parser = super(UploadOvercloudImage, self).get_parser(prog_name) - parser.add_argument( - "--image-path", - default=self._get_environment_var('IMAGE_PATH', './'), - help=_("Path to directory containing image files"), - ) - parser.add_argument( - "--os-image-name", - default=self._get_environment_var('OS_IMAGE_NAME', None), - help=_("OpenStack disk image filename"), - ) - parser.add_argument( - "--ironic-python-agent-name", - dest='ipa_name', - default=self._get_environment_var('IRONIC_PYTHON_AGENT_NAME', - 'ironic-python-agent', - deprecated=['AGENT_NAME']), - help=_("OpenStack ironic-python-agent (agent) image filename"), - ) - parser.add_argument( - "--http-boot", - default=self._get_environment_var( - 'HTTP_BOOT', - constants.IRONIC_HTTP_BOOT_BIND_MOUNT), - help=_("Root directory for the ironic-python-agent image. If " - "uploading images for multiple architectures/platforms, " - "vary this argument such that a distinct folder is " - "created for each architecture/platform.") - ) - parser.add_argument( - "--update-existing", - dest="update_existing", - action="store_true", - help=_("Update images if already exist"), - ) - parser.add_argument( - "--whole-disk", - dest="whole_disk", - action="store_true", - default=False, - help=_("When set, the overcloud-full image to be uploaded " - "will be considered as a whole disk one"), - ) - parser.add_argument( - "--architecture", - help=_("Architecture type for these images, " - "\'x86_64\', \'i386\' and \'ppc64le\' " - "are common options. This option should match at least " - "one \'arch\' value in instackenv.json"), - ) - parser.add_argument( - "--platform", - help=_("Platform type for these images. Platform is a " - "sub-category of architecture. For example you may have " - "generic images for x86_64 but offer images specific to " - "SandyBridge (SNB)."), - ) - parser.add_argument( - "--image-type", - dest="image_type", - choices=["os", "ironic-python-agent"], - help=_("If specified, allows to restrict the image type to upload " - "(os for the overcloud image or ironic-python-agent for " - "the ironic-python-agent one)"), - ) - parser.add_argument( - "--progress", - dest="progress", - action="store_true", - default=False, - help=_('Show progress bar for upload files action')) - parser.add_argument( - "--local", - dest="local", - action="store_true", - default=True, - help=_('DEPRECATED: Copy files locally, even if there is an image ' - 'service endpoint. The default has been changed to copy ' - 'files locally.')) - parser.add_argument( - "--no-local", - dest="local", - action="store_false", - default=True, - help=_('Upload files to image service.')) - parser.add_argument( - "--local-path", - default=self._get_environment_var( - 'LOCAL_IMAGE_PATH', - constants.IRONIC_LOCAL_IMAGE_PATH), - help=_("Root directory for image file copy destination when there " - "is no image endpoint, or when --local is specified") - ) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - self.updated = [] - self.adapter = self._get_client_adapter(parsed_args) - - if parsed_args.platform and not parsed_args.architecture: - raise exceptions.CommandError('You supplied a platform (%s) ' - 'without specifying the ' - 'architecture') - - self.log.debug("checking if image files exist") - - image_files = [] - image_filename = self._get_image_filename(parsed_args) - image_name = os.path.splitext(image_filename)[0] - - if image_filename == constants.DEFAULT_WHOLE_DISK_IMAGE: - parsed_args.whole_disk = True - - if parsed_args.image_type is None or \ - parsed_args.image_type == 'ironic-python-agent': - image_files.append('%s.initramfs' % parsed_args.ipa_name) - image_files.append('%s.kernel' % parsed_args.ipa_name) - - if parsed_args.image_type is None or parsed_args.image_type == 'os': - image_files.append(image_filename) - - if parsed_args.whole_disk: - overcloud_image_type = 'whole disk' - else: - overcloud_image_type = 'partition' - - for image in image_files: - extension = image.split('.')[-1] - image_path = os.path.join(parsed_args.image_path, image) - self.adapter.check_file_exists(image_path) - # Convert qcow2 image to raw, see bug/1893912 - if extension == 'qcow2': - self.adapter._convert_image( - image_path, - os.path.join(parsed_args.image_path, image_name + '.raw')) - - self.log.debug("uploading %s overcloud images " % - overcloud_image_type) - - properties = {} - arch = parsed_args.architecture - if arch: - properties['hw_architecture'] = arch - else: - properties['hw_architecture'] = tripleo_common.arch.kernel_arch() - platform = parsed_args.platform - if platform: - properties['tripleo_platform'] = platform - - if parsed_args.image_type is None or parsed_args.image_type == 'os': - # vmlinuz and initrd only need to be uploaded for a partition image - if not parsed_args.whole_disk: - kernel = self.adapter.update_or_upload( - image_name=image_name, - properties=properties, - names_func=plugin_utils.overcloud_kernel, - arch=arch, - platform=platform, - disk_format='aki' - ) - - ramdisk = self.adapter.update_or_upload( - image_name=image_name, - properties=properties, - names_func=plugin_utils.overcloud_ramdisk, - arch=arch, - platform=platform, - disk_format='ari' - ) - - overcloud_image = self.adapter.update_or_upload( - image_name=image_name, - properties=dict( - {'kernel_id': kernel.id, - 'ramdisk_id': ramdisk.id}, - **properties), - names_func=plugin_utils.overcloud_image, - arch=arch, - platform=platform - ) - - img_kernel_id = self.adapter.get_image_property( - overcloud_image, 'kernel_id') - img_ramdisk_id = self.adapter.get_image_property( - overcloud_image, 'ramdisk_id') - # check overcloud image links - if img_kernel_id is None or img_ramdisk_id is None: - self.log.error('Link of overcloud image %s to its initrd' - ' or kernel images is MISSING.' - 'You can keep it or fix it manually.' % - overcloud_image.name) - elif (img_kernel_id != kernel.id or - img_ramdisk_id != ramdisk.id): - self.log.error('Link of overcloud image %s to its initrd' - ' or kernel images leads to OLD image.' - 'You can keep it or fix it manually.' % - overcloud_image.name) - - else: - overcloud_image = self.adapter.update_or_upload( - image_name=image_name, - properties=properties, - names_func=plugin_utils.overcloud_image, - arch=arch, - platform=platform - ) - - self.log.debug("uploading bm images") - - if parsed_args.image_type is None or \ - parsed_args.image_type == 'ironic-python-agent': - self.log.debug("copy agent images to HTTP BOOT dir") - - self.adapter.file_create_or_update( - os.path.join(parsed_args.image_path, - '%s.kernel' % parsed_args.ipa_name), - os.path.join(parsed_args.http_boot, 'agent.kernel') - ) - - self.adapter.file_create_or_update( - os.path.join(parsed_args.image_path, - '%s.initramfs' % parsed_args.ipa_name), - os.path.join(parsed_args.http_boot, 'agent.ramdisk') - ) - - if self.updated: - print('%s images have been updated, make sure to ' - 'rerun\n\topenstack overcloud node configure\nto reflect ' - 'the changes on the nodes' % len(self.updated)) diff --git a/tripleoclient/v1/overcloud_netenv_validate.py b/tripleoclient/v1/overcloud_netenv_validate.py deleted file mode 100644 index 47018f53b..000000000 --- a/tripleoclient/v1/overcloud_netenv_validate.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import itertools -import logging -import os - -import ipaddress -from osc_lib.i18n import _ -import yaml - -from tripleoclient import command - - -class ValidateOvercloudNetenv(command.Command): - """Validate the network environment file.""" - - auth_required = False - log = logging.getLogger(__name__ + ".ValidateOvercloudNetworkEnvironment") - - def get_parser(self, prog_name): - parser = super(ValidateOvercloudNetenv, self).get_parser(prog_name) - parser.add_argument( - '-f', '--file', dest='netenv', - help=_("Path to the network environment file"), - default='network-environment.yaml') - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - with open(parsed_args.netenv, 'r') as net_file: - network_data = yaml.safe_load(net_file) - - cidrinfo = {} - poolsinfo = {} - vlaninfo = {} - - self.error_count = 0 - - for item in network_data['resource_registry']: - if item.endswith("Net::SoftwareConfig"): - data = network_data['resource_registry'][item] - self.log.info('Validating %s', data) - data_path = os.path.join(os.path.dirname(parsed_args.netenv), - data) - self.NIC_validate(item, data_path) - - for item in network_data['parameter_defaults']: - data = network_data['parameter_defaults'][item] - - if item.endswith('NetCidr'): - cidrinfo[item] = data - elif item.endswith('AllocationPools'): - poolsinfo[item] = data - elif item.endswith('NetworkVlanID'): - vlaninfo[item] = data - elif item == 'ExternalInterfaceDefaultRoute': - pass - elif item == 'BondInterfaceOvsOptions': - pass - - self.check_cidr_overlap(cidrinfo.values()) - self.check_allocation_pools_pairing(network_data['parameter_defaults'], - poolsinfo) - self.check_vlan_ids(vlaninfo) - - if self.error_count > 0: - print('\nFAILED Validation with %i error(s)' % self.error_count) - else: - print('SUCCESSFUL Validation with %i error(s)' % self.error_count) - - def check_cidr_overlap(self, networks): - objs = [] - for x in networks: - try: - objs += [ipaddress.ip_network(str(x))] - except ValueError: - self.log.error('Invalid address: %s', x) - self.error_count += 1 - - for net1, net2 in itertools.combinations(objs, 2): - if (net1.overlaps(net2)): - self.log.error( - 'Overlapping networks detected {} {}'.format(net1, net2)) - self.error_count += 1 - - def check_allocation_pools_pairing(self, filedata, pools): - for poolitem in pools: - pooldata = filedata[poolitem] - - self.log.info('Checking allocation pool {}'.format(poolitem)) - - pool_objs = [] - for pool in pooldata: - try: - ip_start = ipaddress.ip_address( - str(pool['start'])) - except ValueError: - self.log.error('Invalid address: %s' % ip_start) - self.error_count += 1 - ip_start = None - try: - ip_end = ipaddress.ip_address(str(pool['end'])) - except ValueError: - self.log.error('Invalid address: %s' % ip_start) - self.error_count += 1 - ip_end = None - if (ip_start is None) or (ip_end is None): - continue - try: - pool_objs.append(list( - ipaddress.summarize_address_range(ip_start, ip_end))) - except (TypeError, ValueError) as ex: - self.log.error( - ( - 'Encountered exception `{}` while working with\n' - 'the address pool: {}, {}').format( - ex, - ip_start, - ip_end)) - - self.error_count += 1 - - subnet_item = poolitem.split('AllocationPools')[0] + 'NetCidr' - try: - subnet_obj = ipaddress.ip_network( - str(filedata[subnet_item])) - except ValueError: - self.log.error('Invalid address: %s', subnet_item) - self.error_count += 1 - continue - - for ranges in pool_objs: - for range in ranges: - if not subnet_obj.overlaps(range): - self.log.error( - 'Allocation pool {} {} outside of subnet {}: {}' - .format(poolitem, pooldata, subnet_item, - subnet_obj)) - self.error_count += 1 - break - - def check_vlan_ids(self, vlans): - invertdict = {} - for k, v in vlans.items(): - self.log.info('Checking Vlan ID {}'.format(k)) - if v not in invertdict: - invertdict[v] = k - else: - self.log.error('Vlan ID {} ({}) already exists in {}'.format( - v, k, invertdict[v])) - self.error_count += 1 - - def NIC_validate(self, resource, path): - try: - with open(path, 'r') as nic_file: - nic_data = yaml.safe_load(nic_file) - except (IOError, OSError): - self.log.error( - 'The resource "%s" reference file does not exist: "%s"', - resource, path) - self.error_count += 1 - return - - # Look though every resources bridges and make sure there is only a - # single bond per bridge and only 1 interface per bridge if there are - # no bonds. - for item in nic_data['resources']: - bridges = nic_data['resources'][item]['properties']['config'][ - 'str_replace']['params']['$network_config']['network_config'] - for bridge in bridges: - if bridge['type'] == 'ovs_bridge': - bond_count = 0 - interface_count = 0 - for bond in bridge['members']: - if bond['type'] == 'ovs_bond': - bond_count += 1 - if bond['type'] == 'interface': - interface_count += 1 - if bond_count == 0: - # Logging could be better if we knew the bridge name. - # Since it's passed as a paramter we would need to - # catch it - self.log.debug( - 'There are 0 bonds for bridge %s of ' - 'resource %s in %s', - bridge['name'], item, path) - if bond_count == 1: - self.log.debug( - 'There is 1 bond for bridge %s of ' - 'resource %s in %s', - bridge['name'], item, path) - if bond_count == 2: - self.log.error( - 'Invalid bonding: There are 2 bonds for bridge %s ' - 'of resource %s in %s', - bridge['name'], item, path) - self.error_count += 1 - if bond_count == 0 and interface_count > 1: - self.log.error( - 'Invalid interface: When not using a bond, there ' - 'can only be 1 interface for bridge %s of resource' - '%s in %s', - bridge['name'], item, path) - self.error_count += 1 diff --git a/tripleoclient/v1/overcloud_node.py b/tripleoclient/v1/overcloud_node.py deleted file mode 100644 index 2c0cb3831..000000000 --- a/tripleoclient/v1/overcloud_node.py +++ /dev/null @@ -1,840 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -import copy -import datetime -from io import StringIO -import ipaddress -import json -import logging -import os -import sys -import time - -from cliff.formatters import table -from openstack import exceptions as openstack_exc -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from osc_lib import utils -import tempfile -import yaml - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import utils as oooutils -from tripleoclient.workflows import baremetal -from tripleoclient.workflows import tripleo_baremetal as tb - - -class DeleteNode(command.Command): - """Delete overcloud nodes.""" - - log = logging.getLogger(__name__ + ".DeleteNode") - - def get_parser(self, prog_name): - parser = super(DeleteNode, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('nodes', metavar='', nargs="*", - default=[], - help=_('Node ID(s) to delete (otherwise specified ' - 'in the --baremetal-deployment file)')) - group.add_argument('-b', '--baremetal-deployment', - metavar='', - help=_('Configuration file describing the ' - 'baremetal deployment')) - - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack to scale ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - - parser.add_argument( - '--timeout', metavar='', - type=int, default=constants.STACK_TIMEOUT, dest='timeout', - help=_("Timeout in minutes to wait for the nodes to be deleted. " - "Keep in mind that due to keystone session duration " - "that timeout has an upper bound of 4 hours ") - ) - parser.add_argument( - '--overcloud-ssh-port-timeout', - help=_('Timeout for the ssh port to become active.'), - type=int, - default=constants.ENABLE_SSH_ADMIN_SSH_PORT_TIMEOUT - ) - parser.add_argument('-y', '--yes', - help=_('Skip yes/no prompt (assume yes).'), - default=False, - action="store_true") - return parser - - def _nodes_to_delete(self, parsed_args, roles): - with oooutils.TempDirs() as tmp: - unprovision_confirm = os.path.join( - tmp, 'unprovision_confirm.json') - - oooutils.run_ansible_playbook( - playbook='cli-overcloud-node-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - timeout=parsed_args.timeout, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "prompt": True, - "unprovision_confirm": unprovision_confirm, - } - ) - with open(unprovision_confirm) as f: - to_unprovision = json.load(f) - if isinstance(to_unprovision, dict): - nodes = to_unprovision.get( - 'instances') + to_unprovision.get('pre_provisioned') - else: - nodes = to_unprovision - if not nodes: - print('No nodes to unprovision') - return None, None - TableArgs = collections.namedtuple( - 'TableArgs', 'print_empty max_width fit_width') - args = TableArgs(print_empty=True, max_width=-1, fit_width=True) - nodes_data = [(i.get('hostname', ''), - i.get('name', ''), - i.get('id', '')) for i in nodes] - - node_hostnames = [i['hostname'] for i in nodes if 'hostname' in i] - - formatter = table.TableFormatter() - output = StringIO() - formatter.emit_list( - column_names=['hostname', 'name', 'id'], - data=nodes_data, - stdout=output, - parsed_args=args - ) - return output.getvalue(), node_hostnames - - def _check_skiplist_exists(self, env): - skiplist = env.get('parameter_defaults', - {}).get('DeploymentServerBlacklist') - if skiplist: - self.log.warning(_('[WARNING] DeploymentServerBlacklist is ' - 'ignored when executing scale down actions. If ' - 'the node(s) being removed should *NOT* have ' - 'any actions executed on them, please shut ' - 'them off prior to their removal.')) - - def _check_timeout(self, start, timeout): - used = int((time.time() - start) // 60) - remaining = timeout - used - if remaining <= 0: - raise exceptions.DeploymentError( - 'Deployment timed out after %sm' % used - ) - return remaining - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - # Start our timer. This will be used to calculate the timeout. - start = time.time() - - if parsed_args.baremetal_deployment: - with open(parsed_args.baremetal_deployment, 'r') as fp: - roles = yaml.safe_load(fp) - - nodes_text, nodes = self._nodes_to_delete(parsed_args, roles) - if nodes_text: - print(nodes_text) - else: - return - else: - nodes = parsed_args.nodes - nodes_text = '\n'.join('- %s' % node for node in nodes) - if not parsed_args.yes: - confirm = oooutils.prompt_user_for_confirmation( - message=_("Are you sure you want to delete these overcloud " - "nodes [y/N]? "), - logger=self.log) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - ansible_dir = os.path.join(oooutils.get_default_working_dir( - parsed_args.stack - ), - 'config-download', - parsed_args.stack) - - inventory = os.path.join(ansible_dir, - 'tripleo-ansible-inventory.yaml') - - ansible_cfg = os.path.join(ansible_dir, 'ansible.cfg') - key_file = oooutils.get_key(parsed_args.stack) - - remaining = self._check_timeout(start, parsed_args.timeout) - - oooutils.run_ansible_playbook( - playbook='scale_playbook.yaml', - inventory=inventory, - workdir=ansible_dir, - playbook_dir=ansible_dir, - ansible_cfg=ansible_cfg, - ssh_user='tripleo-admin', - limit_hosts=':'.join('%s' % node for node in nodes), - reproduce_command=True, - ignore_unreachable=True, - timeout=remaining, - extra_env_variables={ - "ANSIBLE_BECOME": True, - "ANSIBLE_PRIVATE_KEY_FILE": key_file - } - ) - - remaining = self._check_timeout(start, parsed_args.timeout) - - if parsed_args.baremetal_deployment: - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-node-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - timeout=remaining, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "prompt": False, - "manage_network_ports": True, - } - ) - - -class ProvideNode(command.Command): - """Mark nodes as available based on UUIDs or current 'manageable' state.""" - - log = logging.getLogger(__name__ + ".ProvideNode") - - def get_parser(self, prog_name): - parser = super(ProvideNode, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('node_uuids', - nargs="*", - metavar="", - default=[], - help=_('Baremetal Node UUIDs for the node(s) to be ' - 'provided')) - group.add_argument("--all-manageable", - action='store_true', - help=_("Provide all nodes currently in 'manageable'" - " state")) - group.add_argument("--verbosity", - type=int, - default=1, - help=_("Print debug output during execution")) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - provide = tb.TripleoProvide(verbosity=parsed_args.verbosity) - - if parsed_args.node_uuids: - provide.provide(nodes=parsed_args.node_uuids) - - else: - provide.provide_manageable_nodes() - - -class CleanNode(command.Command): - """Run node(s) through cleaning.""" - - log = logging.getLogger(__name__ + ".CleanNode") - - def get_parser(self, prog_name): - parser = super(CleanNode, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('node_uuids', - nargs="*", - metavar="", - default=[], - help=_('Baremetal Node UUIDs for the node(s) to be ' - 'cleaned')) - group.add_argument("--all-manageable", - action='store_true', - help=_("Clean all nodes currently in 'manageable'" - " state")) - group.add_argument("--verbosity", - type=int, - default=1, - help=_("Print debug output during execution")) - parser.add_argument('--provide', - action='store_true', - help=_('Provide (make available) the nodes once ' - 'cleaned')) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - nodes = parsed_args.node_uuids - - clean = tb.TripleoClean(verbosity=parsed_args.verbosity) - if nodes: - clean.clean( - nodes=parsed_args.node_uuids) - else: - clean.clean_manageable_nodes() - - if parsed_args.provide: - provide = tb.TripleoProvide(verbosity=parsed_args.verbosity) - if nodes: - provide.provide(nodes=nodes) - else: - provide.provide_manageable_nodes() - - -class ConfigureNode(command.Command): - """Configure Node boot options.""" - - log = logging.getLogger(__name__ + ".ConfigureNode") - - def get_parser(self, prog_name): - parser = super(ConfigureNode, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('node_uuids', - nargs="*", - metavar="", - default=[], - help=_('Baremetal Node UUIDs for the node(s) to be ' - 'configured')) - group.add_argument("--all-manageable", - action='store_true', - help=_("Configure all nodes currently in " - "'manageable' state")) - parser.add_argument( - '--deploy-kernel', - default='file://%s/agent.kernel' % - constants.IRONIC_HTTP_BOOT_BIND_MOUNT, - help=_('Image with deploy kernel.')) - parser.add_argument( - '--deploy-ramdisk', - default='file://%s/agent.ramdisk' % - constants.IRONIC_HTTP_BOOT_BIND_MOUNT, - help=_('Image with deploy ramdisk.')) - parser.add_argument('--instance-boot-option', - choices=['local', 'netboot'], - help=_('Whether to set instances for booting from ' - 'local hard drive (local) or network ' - '(netboot).')) - parser.add_argument('--boot-mode', - choices=['uefi', 'bios'], - help=_('Whether to set the boot mode to UEFI ' - '(uefi) or legacy BIOS (bios)')) - parser.add_argument('--root-device', - help=_('Define the root device for nodes. ' - 'Can be either a list of device names ' - '(without /dev) to choose from or one of ' - 'two strategies: largest or smallest. For ' - 'it to work this command should be run ' - 'after the introspection.')) - parser.add_argument('--root-device-minimum-size', - type=int, default=4, - help=_('Minimum size (in GiB) of the detected ' - 'root device. Used with --root-device.')) - parser.add_argument('--overwrite-root-device-hints', - action='store_true', - help=_('Whether to overwrite existing root device ' - 'hints when --root-device is used.')) - parser.add_argument("--verbosity", - type=int, - default=1, - help=_("Print debug output during execution")) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - conf = tb.TripleoConfigure( - kernel_name=parsed_args.deploy_kernel, - ramdisk_name=parsed_args.deploy_ramdisk, - instance_boot_option=parsed_args.instance_boot_option, - boot_mode=parsed_args.boot_mode, - root_device=parsed_args.root_device, - root_device_minimum_size=parsed_args.root_device_minimum_size, - overwrite_root_device_hints=( - parsed_args.overwrite_root_device_hints) - ) - - if parsed_args.node_uuids: - conf.configure( - node_uuids=parsed_args.node_uuids) - else: - conf.configure_manageable_nodes() - - -class DiscoverNode(command.Command): - """Discover overcloud nodes by polling their BMCs.""" - - log = logging.getLogger(__name__ + ".DiscoverNode") - - def get_parser(self, prog_name): - parser = super(DiscoverNode, self).get_parser(prog_name) - ip_group = parser.add_mutually_exclusive_group(required=True) - ip_group.add_argument('--ip', action='append', - dest='ip_addresses', metavar='', - help=_('IP address(es) to probe')) - ip_group.add_argument('--range', dest='ip_addresses', - metavar='', help=_('IP range to probe')) - parser.add_argument('--credentials', metavar='', - action='append', required=True, - help=_('Key/value pairs of possible credentials')) - parser.add_argument('--port', action='append', metavar='', - type=int, help=_('BMC port(s) to probe')) - parser.add_argument('--introspect', action='store_true', - help=_('Introspect the imported nodes')) - parser.add_argument('--run-validations', action='store_true', - default=False, - help=_('Run the pre-deployment validations. These ' - 'external validations are from the TripleO ' - 'Validations project.')) - parser.add_argument('--provide', action='store_true', - help=_('Provide (make available) the nodes')) - parser.add_argument('--no-deploy-image', action='store_true', - help=_('Skip setting the deploy kernel and ' - 'ramdisk.')) - parser.add_argument('--instance-boot-option', - choices=['local', 'netboot'], default='local', - help=_('Whether to set instances for booting from ' - 'local hard drive (local) or network ' - '(netboot).')) - parser.add_argument('--concurrency', type=int, - default=20, - help=_('Maximum number of nodes to introspect at ' - 'once.')) - parser.add_argument('--node-timeout', type=int, - default=1200, - help=_('Maximum timeout for node introspection.')) - parser.add_argument('--max-retries', type=int, - default=1, - help=_('Maximum introspection retries.')) - parser.add_argument('--retry-timeout', type=int, - default=120, - help=_('Maximum timeout between introspection' - 'retries')) - parser.add_argument("--verbosity", - type=int, - default=1, - help=_("Print debug output during execution")) - return parser - - # FIXME(tonyb): This is not multi-arch safe :( - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.no_deploy_image: - deploy_kernel = None - deploy_ramdisk = None - else: - deploy_kernel = 'file://{}/agent.kernel'.format( - constants.IRONIC_HTTP_BOOT_BIND_MOUNT - ) - deploy_ramdisk = 'file://{}/agent.ramdisk'.format( - constants.IRONIC_HTTP_BOOT_BIND_MOUNT - ) - - credentials = [list(x.split(':', 1)) for x in parsed_args.credentials] - kwargs = {} - # Leave it up to the workflow to figure out the defaults - if parsed_args.port: - kwargs['ports'] = parsed_args.port - - nodes = baremetal.discover_and_enroll( - self.app.client_manager, - ip_addresses=parsed_args.ip_addresses, - credentials=credentials, - kernel_name=deploy_kernel, - ramdisk_name=deploy_ramdisk, - instance_boot_option=parsed_args.instance_boot_option, - **kwargs - ) - - nodes_uuids = [node.uuid for node in nodes] - - if parsed_args.introspect: - baremetal.introspect( - self.app.client_manager, - node_uuids=nodes_uuids, - run_validations=parsed_args.run_validations, - concurrency=parsed_args.concurrency, - node_timeout=parsed_args.node_timeout, - max_retries=parsed_args.max_retries, - retry_timeout=parsed_args.retry_timeout, - ) - - if parsed_args.provide: - provide = tb.TripleoProvide(verbosity=parsed_args.verbosity) - provide.provide(nodes=nodes_uuids) - - -class ExtractProvisionedNode(command.Command): - - log = logging.getLogger(__name__ + ".ExtractProvisionedNode") - - def _setup_clients(self): - self.clients = self.app.client_manager - self.orchestration_client = self.clients.orchestration - self.baremetal_client = self.clients.baremetal - self.network_client = self.clients.network - - def get_parser(self, prog_name): - parser = super(ExtractProvisionedNode, self).get_parser(prog_name) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('-o', '--output', - metavar='', - help=_('The output file path describing the ' - 'baremetal deployment')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt for existing files ' - '(assume yes).')) - parser.add_argument('--roles-file', '-r', dest='roles_file', - required=False, - help=_('Role data definition file')) - parser.add_argument('--networks-file', '-n', dest='networks_file', - required=False, - help=_('Network data definition file')) - parser.add_argument('--working-dir', - action='store', - help=_('The working directory for the deployment ' - 'where all input, output, and generated ' - 'files will be stored.\nDefaults to ' - '"$HOME/overcloud-deploy/"')) - return parser - - def _get_subnet_from_net_name_and_ip(self, net_name, ip_addr): - try: - network = self.network_client.find_network(net_name) - except openstack_exc.DuplicateResource: - raise oscexc.CommandError( - "Unable to extract role networks. Duplicate network resources " - "with name %s detected." % net_name) - - if network is None: - raise oscexc.CommandError("Unable to extract role networks. " - "Network %s not found." % net_name) - - for subnet_id in network.subnet_ids: - subnet = self.network_client.get_subnet(subnet_id) - if (ipaddress.ip_address(ip_addr) - in ipaddress.ip_network(subnet.cidr)): - subnet_name = subnet.name - return subnet_name - - raise oscexc.CommandError("Unable to extract role networks. Could not " - "find subnet for IP address %(ip)s on " - "network %(net)s." % {'ip': ip_addr, - 'net': net_name}) - - def _convert_heat_nic_conf_to_j2(self, stack, role_name, network_data, - resource_registry, parsed_args): - heat_nic_conf = resource_registry.get( - 'OS::TripleO::{}::Net::SoftwareConfig'.format(role_name)) - if heat_nic_conf is None or heat_nic_conf == 'OS::Heat::None': - return None - - j2_nic_conf_dir = os.path.join(self.working_dir, 'nic-configs') - oooutils.makedirs(j2_nic_conf_dir) - - heat_nic_conf_basename = os.path.basename(heat_nic_conf) - tmp_heat_nic_conf_path = os.path.join(j2_nic_conf_dir, - heat_nic_conf_basename) - heat_nic_conf_content = stack.files().get(heat_nic_conf) - - j2_nic_conf_basename = (heat_nic_conf_basename.rstrip('.yaml') + '.j2') - j2_nic_conf_path = os.path.join(j2_nic_conf_dir, j2_nic_conf_basename) - - tmp_net_data_fd, tmp_net_data_path = tempfile.mkstemp(suffix=".yaml") - try: - with open(tmp_net_data_path, 'w') as tmp_net_data: - tmp_net_data.write(yaml.safe_dump(network_data)) - with open(tmp_heat_nic_conf_path, 'w') as tmp_heat_nic_conf: - tmp_heat_nic_conf.write(heat_nic_conf_content) - - cmd = ['/usr/share/openstack-tripleo-heat-templates/tools/' - 'convert_heat_nic_config_to_ansible_j2.py'] - if parsed_args.yes: - cmd.extend(['--yes']) - cmd.extend(['--stack', stack.stack_name, - '--networks_file', tmp_net_data_path, - tmp_heat_nic_conf_path]) - retcode = oooutils.run_command_and_log(self.log, cmd) - finally: - try: - os.remove(tmp_net_data_path) - except (IsADirectoryError, FileNotFoundError, PermissionError): - pass - try: - os.remove(tmp_heat_nic_conf_path) - except (IsADirectoryError, FileNotFoundError, PermissionError): - pass - - return j2_nic_conf_path if retcode == 0 else None - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if not parsed_args.working_dir: - self.working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - self.working_dir = parsed_args.working_dir - oooutils.makedirs(self.working_dir) - - self._setup_clients() - stack = oooutils.get_stack(self.orchestration_client, - parsed_args.stack) - tht_j2_sources = oooutils.get_stack_output_item( - stack, 'TripleoHeatTemplatesJinja2RenderingDataSources') or {} - - if parsed_args.roles_file: - roles_file = os.path.abspath(parsed_args.roles_file) - with open(roles_file, 'r') as fd: - role_data = yaml.safe_load(fd.read()) - else: - role_data = tht_j2_sources.get('roles_data') - if role_data is None: - raise oscexc.CommandError( - "Unable to extract. Role data not available in {} stack " - "output. Please provide the roles data for the deployed " - "stack by setting the --roles-data argument.".format( - parsed_args.stack)) - - if parsed_args.networks_file: - networks_file = os.path.abspath(parsed_args.networks_file) - with open(networks_file, 'r') as fd: - network_data = yaml.safe_load(fd.read()) - else: - network_data = tht_j2_sources.get('networks_data') - if network_data is None: - raise oscexc.CommandError( - "Unable to extract. Network data not available in {} " - "stack output. Please provide the networks data for the " - "deployed stack by setting the --networks-data argument." - .format(parsed_args.stack)) - - # Convert role_data to a dict - role_data = {x['name']: x for x in role_data} - - host_vars = oooutils.get_stack_output_item( - stack, 'AnsibleHostVarsMap') or {} - role_net_ip_map = oooutils.get_stack_output_item( - stack, 'RoleNetIpMap') or {} - parameters = stack.to_dict().get('parameters', {}) - parameter_defaults = stack.environment().get('parameter_defaults', {}) - resource_registry = stack.environment().get('resource_registry', {}) - - # list all baremetal nodes and map hostname to node name - node_details = self.baremetal_client.node.list(detail=True) - hostname_node_map = {} - hostname_node_resource = {} - for node in node_details: - hostname = node.instance_info.get('display_name') - if hostname: - hostname_node_map[hostname] = node.id - if hostname and node.resource_class: - hostname_node_resource[hostname] = node.resource_class - - data = [] - warnings = [] - network_environment = {'resource_registry': {}, - 'parameter_defaults': {}} - for role_name, entries in host_vars.items(): - role_count = len(entries) - - net_parameter_defaults = {} - net_resource_registry = { - "OS::TripleO::{}::Net::SoftwareConfig".format(role_name): - "OS::Heat::None"} - network_environment['resource_registry'].update( - net_resource_registry) - # skip zero count roles - if not role_count: - continue - - if role_name not in role_data: - raise oscexc.CommandError( - "Unable to extract. Invalid role file. Role {} is not " - "defined in roles file {}".format(role_name, roles_file)) - - role = collections.OrderedDict() - role['name'] = role_name - role['count'] = role_count - - hostname_format = parameters.get('%sHostnameFormat' % role_name) - if hostname_format: - role['hostname_format'] = hostname_format - - defaults = role['defaults'] = {} - - # Add networks to the role default section - role_networks = defaults['networks'] = [] - for net_name, ips in role_net_ip_map[role_name].items(): - subnet_name = self._get_subnet_from_net_name_and_ip(net_name, - ips[0]) - if net_name == constants.CTLPLANE_NET_NAME: - role_networks.append({'network': net_name, - 'vif': True}) - else: - role_networks.append({'network': net_name, - 'subnet': subnet_name}) - - # Add network config to role defaults section - net_conf = defaults['network_config'] = {} - net_conf['template'] = parameters.get( - role_name + 'NetworkConfigTemplate') - if net_conf['template'] is None: - net_conf['template'] = self._convert_heat_nic_conf_to_j2( - stack, role_name, network_data, resource_registry, - parsed_args) - - if net_conf['template'] is None: - warnings.append( - 'WARNING: No network config found for role {}. ' - 'Please edit the file and set the path to the correct ' - 'network config template.'.format(role_name)) - else: - net_parameter_defaults = { - "{}NetworkConfigTemplate".format(role_name): - "{}".format(net_conf['template'])} - warnings.append( - 'WARNING: Network config for role {} was ' - 'automatically converted from Heat template to ' - 'Ansible Jinja2 template. Please review the file: {}' - .format(role_name, net_conf['template'])) - - if parameters.get(role_name + 'NetworkDeploymentActions'): - network_deployment_actions = parameters.get( - role_name + 'NetworkDeploymentActions') - else: - network_deployment_actions = parameters.get( - 'NetworkDeploymentActions', ['CREATE']) - - net_conf['network_config_update'] = ( - 'UPDATE' in network_deployment_actions) - - # The NetConfigDataLookup parameter is of type: json, but when - # not set it returns as string '{}' - ncdl = parameters.get('NetConfigDataLookup') - if isinstance(ncdl, str): - ncdl = json.loads(ncdl) - if ncdl: - net_conf['net_config_data_lookup'] = ncdl - - if parameters.get('DnsSearchDomains'): - net_conf['dns_search_domains'] = parameters.get( - 'DnsSearchDomains') - - net_conf['physical_bridge_name'] = parameters.get( - 'NeutronPhysicalBridge', 'br-ex') - net_conf['public_interface_name'] = parameters.get( - 'NeutronPublicInterface', 'nic1') - - if role_data[role_name].get('default_route_networks'): - net_conf['default_route_network'] = role_data[role_name].get( - 'default_route_networks') - if role_data[role_name].get('networks_skip_config'): - net_conf['networks_skip_config'] = role_data[role_name].get( - 'networks_skip_config') - - # Add individual instances - ips_from_pool = parameter_defaults.get( - '{}IPs'.format(role_name), {}) - instances = role['instances'] = [] - for idx, entry in enumerate(sorted(entries)): - instance = {'hostname': entry} - - if entry in hostname_node_map: - instance['name'] = hostname_node_map[entry] - - if entry in hostname_node_resource: - instance['resource_class'] = hostname_node_resource[entry] - - if ips_from_pool: - instance['networks'] = copy.deepcopy(role_networks) - for net in instance['networks']: - net['fixed_ip'] = ( - role_net_ip_map[role_name][net['network']][idx]) - - instances.append(instance) - if net_parameter_defaults != {}: - network_environment['parameter_defaults'].update( - net_parameter_defaults) - data.append(role) - - # Write the file header - file_data = StringIO() - file_data.write('# Generated with the following on %s\n#\n' % - datetime.datetime.now().isoformat()) - file_data.write('# openstack %s\n#\n\n' % - ' '.join(self.app.command_options)) - # Write any warnings in the file header - for warning in warnings: - file_data.write('# {}\n'.format(warning)) - if warnings: - file_data.write(('#\n\n')) - # Write the data - if data: - yaml.dump(data, file_data, RoleDataDumper, width=120, - default_flow_style=False) - - if len(network_environment['parameter_defaults']) > 0: - net_env_file = os.path.join(self.working_dir, - "{}-network-environment.yaml".format( - parsed_args.stack)) - with open(net_env_file, 'w+') as nfp: - nfp.write(yaml.dump(network_environment, - width=120, - default_flow_style=False)) - - if parsed_args.output: - if (os.path.exists(parsed_args.output) - and not parsed_args.yes and sys.stdin.isatty()): - prompt_response = input( - ('Overwrite existing file %s [y/N]?' % parsed_args.output) - ).lower() - if not prompt_response.startswith('y'): - raise oscexc.CommandError( - "Will not overwrite existing file:" - " %s" % parsed_args.output) - with open(parsed_args.output, 'w+') as fp: - fp.write(file_data.getvalue()) - self.app.stdout.write(file_data.getvalue()) - - -class RoleDataDumper(yaml.SafeDumper): - def represent_ordered_dict(self, data): - return self.represent_dict(data.items()) - - -RoleDataDumper.add_representer(collections.OrderedDict, - RoleDataDumper.represent_ordered_dict) diff --git a/tripleoclient/v1/overcloud_parameters.py b/tripleoclient/v1/overcloud_parameters.py deleted file mode 100644 index 8418cd99b..000000000 --- a/tripleoclient/v1/overcloud_parameters.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse -import logging -import yaml - -from osc_lib.i18n import _ - -from tripleoclient import command -from tripleoclient import utils -from tripleoclient.workflows import parameters - - -class GenerateFencingParameters(command.Command): - """Generate fencing parameters""" - - log = logging.getLogger(__name__ + ".GenerateFencing") - - def get_parser(self, prog_name): - parser = super(GenerateFencingParameters, self).get_parser(prog_name) - parser.add_argument('-a', '--action', dest='fence_action', - help=_('DEPRECATED: This option is ignored.')) - parser.add_argument('--delay', type=int, - help=_('Wait DELAY seconds before fencing is ' - 'started')) - parser.add_argument('--ipmi-lanplus', - dest='ipmi_lanplus', - default=True, - action='store_true', - help=_('DEPRECATED: This is the default.')) - parser.add_argument('--ipmi-no-lanplus', - dest='ipmi_lanplus', - action='store_false', - help=_('Do not use Lanplus. Defaults to: false')) - parser.add_argument('--ipmi-cipher', type=int, - help=_('Ciphersuite to use (same as ipmitool -C ' - 'parameter.')) - parser.add_argument('--ipmi-level', - help=_('Privilegel level on IPMI device. Valid ' - 'levels: callback, user, operator, ' - 'administrator.')) - parser.add_argument('--output', type=argparse.FileType('w'), - help=_('Write parameters to a file')) - parser.add_argument('instackenv', type=argparse.FileType('r')) - return parser - - def take_action(self, parsed_args): - nodes_config = utils.parse_env_file(parsed_args.instackenv) - parsed_args.instackenv.close() - result = parameters.generate_fencing_parameters( - nodes_json=nodes_config, - delay=parsed_args.delay, - ipmi_level=parsed_args.ipmi_level, - ipmi_cipher=parsed_args.ipmi_cipher, - ipmi_lanplus=parsed_args.ipmi_lanplus, - ) - - fencing_parameters = yaml.safe_dump(result, default_flow_style=False) - if parsed_args.output: - parsed_args.output.write(fencing_parameters) - parsed_args.output.close() - else: - print(fencing_parameters) diff --git a/tripleoclient/v1/overcloud_profiles.py b/tripleoclient/v1/overcloud_profiles.py deleted file mode 100644 index 77dc94a77..000000000 --- a/tripleoclient/v1/overcloud_profiles.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from osc_lib.i18n import _ - -from tripleoclient import command -from tripleoclient import exceptions -from tripleoclient import utils - -DEPRECATION_MSG = ''' -This command has been DEPRECATED and will be removed. The compute service is no -longer used on the undercloud by default, hence profile matching with compute -flavors is no longer used. -''' - - -class MatchProfiles(command.Command): - """Assign and validate profiles on nodes""" - - log = logging.getLogger(__name__ + ".MatchProfiles") - - def get_parser(self, prog_name): - parser = super(MatchProfiles, self).get_parser(prog_name) - parser.epilog = DEPRECATION_MSG - parser.add_argument( - '--dry-run', - action='store_true', - default=False, - help=_('Only run validations, but do not apply any changes.') - ) - utils.add_deployment_plan_arguments(parser) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - self.log.warning(DEPRECATION_MSG) - bm_client = self.app.client_manager.baremetal - - flavors = self._collect_flavors(parsed_args) - - errors, warnings = utils.assign_and_verify_profiles( - bm_client, flavors, - assign_profiles=True, - dry_run=parsed_args.dry_run - ) - if errors: - raise exceptions.ProfileMatchingError( - _('Failed to validate and assign profiles.')) - - def _collect_flavors(self, parsed_args): - """Collect nova flavors in use. - - :returns: dictionary flavor name -> (flavor object, scale) - """ - compute_client = self.app.client_manager.compute - - flavors = {f.name: f for f in compute_client.flavors.list()} - result = {} - - message = "Provided --{}-flavor, '{}', does not exist" - - for target, (flavor_name, scale) in ( - utils.get_roles_info(parsed_args).items() - ): - if flavor_name is None or not scale: - self.log.debug("--{}-flavor not used".format(target)) - continue - - try: - flavor = flavors[flavor_name] - except KeyError: - raise exceptions.ProfileMatchingError( - message.format(target, flavor_name)) - - result[flavor_name] = (flavor, scale) - - return result - - -POSTFIX = '_profile' - - -class ListProfiles(command.Lister): - """List overcloud node profiles""" - - log = logging.getLogger(__name__ + ".ListProfiles") - - def get_parser(self, prog_name): - parser = super(ListProfiles, self).get_parser(prog_name) - parser.epilog = DEPRECATION_MSG - parser.add_argument( - '--all', - action='store_true', - default=False, - help=_('List all nodes, even those not available to Nova.') - ) - utils.add_deployment_plan_arguments(parser) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - self.log.warning(DEPRECATION_MSG) - bm_client = self.app.client_manager.baremetal - compute_client = self.app.client_manager.compute - - hypervisors = {h.hypervisor_hostname: h - for h in compute_client.hypervisors.list() - if h.hypervisor_type == 'ironic'} - result = [] - - maintenance = None if parsed_args.all else False - for node in bm_client.node.list(detail=True, maintenance=maintenance): - error = '' - - if node.provision_state not in ('active', 'available'): - error = "Provision state %s" % node.provision_state - elif node.power_state in (None, 'error'): - error = "Power state %s" % node.power_state - elif node.maintenance: - error = "Maintenance" - else: - try: - hypervisor = hypervisors[node.uuid] - except KeyError: - error = 'No hypervisor record' - else: - if hypervisor.status != 'enabled': - error = 'Compute service disabled' - elif hypervisor.state != 'up': - error = 'Compute service down' - - if error and not parsed_args.all: - continue - - caps = utils.node_get_capabilities(node) - profile = caps.get('profile') - possible_profiles = [k[:-len(POSTFIX)] - for k, v in caps.items() - if k.endswith(POSTFIX) and - v.lower() in ('1', 'true')] - # sorting for convenient display and testing - possible_profiles.sort() - - record = (node.uuid, node.name or '', node.provision_state, - profile, ', '.join(possible_profiles)) - if parsed_args.all: - record += (error,) - result.append(record) - - cols = ("Node UUID", "Node Name", "Provision State", "Current Profile", - "Possible Profiles") - if parsed_args.all: - cols += ('Error',) - return (cols, result) diff --git a/tripleoclient/v1/overcloud_raid.py b/tripleoclient/v1/overcloud_raid.py deleted file mode 100644 index 2fee463c2..000000000 --- a/tripleoclient/v1/overcloud_raid.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import os - -from osc_lib.i18n import _ -import yaml - -from tripleoclient import command -from tripleoclient import utils -from tripleoclient.workflows import baremetal - - -class CreateRAID(command.Command): - """Create RAID on given nodes""" - - log = logging.getLogger(__name__ + ".CreateRAID") - - def get_parser(self, prog_name): - parser = super(CreateRAID, self).get_parser(prog_name) - parser.add_argument('--node', action='append', required=True, - help=_('Nodes to create RAID on (expected to be ' - 'in manageable state). Can be specified ' - 'multiple times.')) - parser.add_argument('configuration', - help=_('RAID configuration (YAML/JSON string or ' - 'file name).')) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action({args})".format(args=parsed_args)) - - if os.path.exists(parsed_args.configuration): - with open(parsed_args.configuration, 'r') as fp: - configuration = yaml.safe_load(fp.read()) - else: - try: - configuration = yaml.safe_load(parsed_args.configuration) - except yaml.YAMLError as exc: - raise RuntimeError( - _('Configuration is not an existing file and cannot be ' - 'parsed as YAML: %s') % exc) - - # Basic sanity check, we defer the full check to Ironic - try: - disks = configuration['logical_disks'] - except KeyError: - raise ValueError( - _('Configuration must contain key "logical_disks"')) - except TypeError: - raise TypeError( - _('Configuration must be an object, got %r instead') - % configuration) - - if (not isinstance(disks, list) or - not all(isinstance(item, dict) for item in disks)): - raise TypeError( - _('Logical disks list is expected to be a list of objects, ' - 'got %r instead') % disks) - - baremetal.create_raid_configuration( - clients=self.app.client_manager, - node_uuids=parsed_args.node, - configuration=configuration, - verbosity=utils.playbook_verbosity(self=self) - ) diff --git a/tripleoclient/v1/overcloud_restore.py b/tripleoclient/v1/overcloud_restore.py deleted file mode 100644 index f055d9a4d..000000000 --- a/tripleoclient/v1/overcloud_restore.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import logging -import os -import yaml - -from osc_lib import exceptions as oscexc -from osc_lib.command import command -from osc_lib.i18n import _ -from osc_lib import utils as osc_utils - -from tripleoclient import constants -from tripleoclient import utils - -LOG = logging.getLogger(__name__ + ".RestoreOvercloud") - -INVENTORY = constants.ANSIBLE_INVENTORY.format('overcloud') - - -class RestoreOvercloud(command.Command): - """Restore the Overcloud""" - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - - parser.add_argument( - '--inventory', - default=INVENTORY, - help=_("Tripleo inventory file generated with " - "tripleo-ansible-inventory command. " - "Defaults to: " + INVENTORY) - ) - - parser.add_argument( - '--stack', - nargs='?', - help=_('Name or ID of the stack to be used' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=osc_utils.env('OVERCLOUD_STACK_NAME')) - - parser.add_argument( - '--node-name', - required=True, - help=_("Controller name is a required parameter " - "which defines the controller node to be " - "restored.") - ) - - parser.add_argument( - '--extra-vars', - default=None, - action='store', - help=_("Set additional variables as Dict or as " - "an absolute path of a JSON or YAML file type. " - "i.e. --extra-vars '{\"key\": \"val\", " - " \"key2\": \"val2\"}' " - "i.e. --extra-vars /path/to/my_vars.yaml " - "i.e. --extra-vars /path/to/my_vars.json. " - "For more information about the variables that " - "can be passed, visit: https://opendev.org/openstack/" - "tripleo-ansible/src/branch/master/tripleo_ansible/" - "roles/backup_and_restore/defaults/main.yml.") - ) - - return parser - - def _parse_extra_vars(self, raw_extra_vars): - - if raw_extra_vars is None: - return {} - if os.path.exists(raw_extra_vars): - with open(raw_extra_vars, 'r') as fp: - extra_vars = yaml.safe_load(fp.read()) - else: - try: - extra_vars = yaml.safe_load(raw_extra_vars) - except yaml.YAMLError as exc: - raise RuntimeError( - _('--extra-vars is not an existing file and cannot be ' - 'parsed as YAML / JSON: %s') % exc) - - return extra_vars - - def _run_restore_overcloud(self, parsed_args): - """Backup defined overcloud nodes.""" - - if parsed_args.stack in (None, ''): - raise oscexc.CommandError("You must specify a stack name") - - extra_vars = self._parse_extra_vars(parsed_args.extra_vars) - node = parsed_args.node_name - parameter = 'tripleo_backup_and_restore_overcloud_restore_name' - extra_vars[parameter] = node - - self._run_ansible_playbook( - playbook='cli-overcloud-restore-node.yaml', - inventory=parsed_args.inventory, - tags=None, - skip_tags=None, - extra_vars=extra_vars, - ssh_user='stack' - ) - - def _run_ansible_playbook(self, - playbook, - inventory, - tags, - skip_tags, - extra_vars, - ssh_user): - """Run ansible playbook""" - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook=playbook, - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - tags=tags, - skip_tags=skip_tags, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ssh_user=ssh_user - ) - - def take_action(self, parsed_args): - - self._run_restore_overcloud(parsed_args) - - print( - '\n' - ' #############################################################\n' - ' # Disclaimer #\n' - ' # Backup verification is the End Users responsibility #\n' - ' # Please verify backup integrity before any possible #\n' - ' # disruptive actions against the Overcloud. The resulting #\n' - ' # backup file path will be shown on a successful execution. #\n' - ' # #\n' - ' # .-Stay safe and avoid future issues-. #\n' - ' #############################################################\n' - ) diff --git a/tripleoclient/v1/overcloud_roles.py b/tripleoclient/v1/overcloud_roles.py deleted file mode 100644 index ed27e4e60..000000000 --- a/tripleoclient/v1/overcloud_roles.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -import os -import sys - -from tripleo_common.exception import NotFound -from tripleo_common.utils import roles as rolesutils - -from tripleoclient import command -from tripleoclient.constants import TRIPLEO_HEAT_TEMPLATES - - -class RolesBaseCommand(command.Command): - auth_required = False - - def get_parser(self, prog_name): - parser = super(RolesBaseCommand, self).get_parser(prog_name) - path = os.path.join(TRIPLEO_HEAT_TEMPLATES, 'roles') - parser.add_argument('--roles-path', metavar='', - default=path, - help='Filesystem path containing the role yaml ' - 'files. By default this is {}'.format(path)) - return parser - - -class RolesGenerate(RolesBaseCommand): - """Generate roles_data.yaml file""" - def get_parser(self, prog_name): - parser = super(RolesGenerate, self).get_parser(prog_name) - parser.add_argument('-o', '--output-file', metavar='', - help='File to capture all output to. For example, ' - 'roles_data.yaml') - parser.add_argument('--skip-validate', action='store_false', - help='Skip role metadata type validation when' - 'generating the roles_data.yaml') - parser.add_argument('roles', nargs="+", metavar='', - help='List of roles to use to generate the ' - 'roles_data.yaml file for the deployment. ' - 'NOTE: Ordering is important if no role has ' - 'the "primary" and "controller" tags. If no ' - 'role is tagged then the first role listed ' - 'will be considered the primary role. This ' - 'usually is the controller role.') - return parser - - def _capture_output(self, filename=None): - """Capture stdout to a file if provided""" - if filename is not None: - sys.stdout = open(filename, 'w') - - def _stop_capture_output(self, filename=None): - """Stop capturing stdout to a file if provided""" - if filename is not None: - sys.stdout.close() - - def take_action(self, parsed_args): - """Generate roles_data.yaml from imputed roles - - From the provided roles, validate that we have yaml files for the each - role in our roles path and print them out concatenated together in the - order they were provided. - """ - self.log.debug('take_action({})'.format(parsed_args)) - roles_path = os.path.realpath(parsed_args.roles_path) - # eliminate any dupes from the command line with an OrderedDict - requested_roles = collections.OrderedDict.fromkeys(parsed_args.roles) - available_roles = rolesutils.get_roles_list_from_directory(roles_path) - rolesutils.check_role_exists(available_roles, - list(requested_roles.keys())) - self._capture_output(parsed_args.output_file) - roles_data = rolesutils.generate_roles_data_from_directory( - roles_path, list(requested_roles.keys()), - parsed_args.skip_validate) - sys.stdout.write(roles_data) - self._stop_capture_output(parsed_args.output_file) - - -class RoleList(RolesBaseCommand): - """List availables roles.""" - - def get_parser(self, prog_name): - parser = super(RoleList, self).get_parser(prog_name) - return parser - - def take_action(self, parsed_args): - self.log.debug('take_action({})'.format(parsed_args)) - roles_path = os.path.realpath(parsed_args.roles_path) - roles = rolesutils.get_roles_list_from_directory(roles_path) - print('\n'.join(roles)) - - -class RoleShow(RolesBaseCommand): - """Show information about a given role.""" - - def get_parser(self, prog_name): - parser = super(RoleShow, self).get_parser(prog_name) - parser.add_argument('role', metavar='', - help='Role to display more information about.') - return parser - - def take_action(self, parsed_args): - self.log.debug('take_action({})'.format(parsed_args)) - roles_path = os.path.realpath(parsed_args.roles_path) - role_name = parsed_args.role - file_path = os.path.join(roles_path, '{}.yaml'.format(role_name)) - try: - with open(file_path, 'r') as f: - role = rolesutils.validate_role_yaml(f) - except IOError: - raise NotFound("Role '{}' not found. Use 'openstack overcloud " - "roles list' to see the available roles.". - format(parsed_args.role)) - - if 'name' in role: - print('#' * 79) - print("# Role Data for '{}'".format(role['name'])) - print('#' * 79) - - for key in sorted(role.keys()): - print("{}:".format(key), end='') - value = role[key] - - if isinstance(value, (list, tuple)): - print('') - print('\n'.join([' * {0}'.format(v) for v in value])) - else: - print(" '{}'".format(value)) diff --git a/tripleoclient/v1/overcloud_update.py b/tripleoclient/v1/overcloud_update.py deleted file mode 100644 index 12745c53b..000000000 --- a/tripleoclient/v1/overcloud_update.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import os - -from oslo_config import cfg -from oslo_log import log as logging - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient.exceptions import OvercloudUpdateNotConfirmed - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils -from tripleoclient.v1.overcloud_deploy import DeployOvercloud - - -CONF = cfg.CONF - - -class UpdatePrepare(DeployOvercloud): - """Use Heat to update and render the new Ansible playbooks based - on the updated templates. - - These playbooks will be rendered and used during the update run step - to perform the minor update of the overcloud nodes. - """ - - log = logging.getLogger(__name__ + ".MinorUpdatePrepare") - - def get_parser(self, prog_name): - parser = super(UpdatePrepare, self).get_parser(prog_name) - - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - oooutils.ensure_run_as_normal_user() - - if (not parsed_args.yes - and not oooutils.prompt_user_for_confirmation( - constants.UPDATE_PROMPT, self.log)): - raise OvercloudUpdateNotConfirmed(constants.UPDATE_NO) - - # In case of update and upgrade we need to force the - # config_download to false. The heat stack update will be performed - # by DeployOvercloud class but skipping the config download part. - parsed_args.stack_only = True - - # Add the update-prepare.yaml environment to set noops etc - templates_dir = (parsed_args.templates or - constants.TRIPLEO_HEAT_TEMPLATES) - parsed_args.environment_files = oooutils.prepend_environment( - parsed_args.environment_files, templates_dir, - constants.UPDATE_PREPARE_ENV) - - # Throw deprecation warning if service is enabled and - # ask user if update should still be continued. - if parsed_args.environment_files: - oooutils.duplicate_param_check( - user_environments=parsed_args.environment_files - ) - oooutils.check_deprecated_service_is_enabled( - parsed_args.environment_files) - - super(UpdatePrepare, self).take_action(parsed_args) - self.log.info("Update init on stack {0} complete.".format( - parsed_args.stack)) - - -class UpdateRun(command.Command): - """Run minor update ansible playbooks on Overcloud nodes""" - - log = logging.getLogger(__name__ + ".MinorUpdateRun") - - def get_parser(self, prog_name): - parser = super(UpdateRun, self).get_parser(prog_name) - parser.add_argument( - '--limit', - action='store', - required=True, - help=_("A string that identifies a single node or comma-separated" - "list of nodes the config-download Ansible playbook " - "execution will be limited to. For example: --limit" - " \"compute-0,compute-1,compute-5\".") - ) - parser.add_argument('--playbook', - nargs="*", - default=None, - help=_("Ansible playbook to use for the minor" - " update. Can be used multiple times." - " Set this to each of those playbooks in" - " consecutive invocations of this command" - " if you prefer to run them manually." - " Note: make sure to run all playbooks so" - " that all services are updated and running" - " with the target version configuration.") - ) - parser.add_argument("--ssh-user", - dest="ssh_user", - action="store", - default="tripleo-admin", - help=_("DEPRECATED: Only tripleo-admin should be " - "used as ssh user.") - ) - parser.add_argument('--static-inventory', - dest='static_inventory', - action="store", - default=None, - help=_('DEPRECATED: tripleo-ansible-inventory.yaml' - ' in working dir will be used.') - ) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud') - ) - parser.add_argument( - '--tags', - action='store', - default=None, - help=_('A list of tags to use when running the the config-download' - ' ansible-playbook command.') - ) - parser.add_argument( - '--skip-tags', - action='store', - default=None, - help=_('A list of tags to skip when running the the' - ' config-download ansible-playbook command.') - ) - parser.add_argument( - '-y', '--yes', - default=False, - action='store_true', - help=_("Use -y or --yes to skip the confirmation required before " - "any update operation. Use this with caution! "), - ) - parser.add_argument( - '--ansible-forks', - action='store', - default=None, - type=int, - help=_('The number of Ansible forks to use for the' - ' config-download ansible-playbook command.') - ) - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - oooutils.ensure_run_as_normal_user() - - if (not parsed_args.yes - and not oooutils.prompt_user_for_confirmation( - constants.UPDATE_PROMPT, self.log)): - raise OvercloudUpdateNotConfirmed(constants.UPDATE_NO) - # NOTE(cloudnull): The string option "all" was a special default - # that is no longer relevant. To retain compatibility - # this condition has been put in place. - if not parsed_args.playbook or parsed_args.playbook == ['all']: - playbook = constants.MINOR_UPDATE_PLAYBOOKS - else: - playbook = parsed_args.playbook - - ansible_dir = os.path.join(oooutils.get_default_working_dir( - parsed_args.stack - ), - 'config-download', - parsed_args.stack) - - inventory = os.path.join(ansible_dir, 'tripleo-ansible-inventory.yaml') - ansible_cfg = os.path.join(ansible_dir, 'ansible.cfg') - key_file = oooutils.get_key(parsed_args.stack) - - oooutils.run_ansible_playbook( - playbook=playbook, - inventory=inventory, - workdir=ansible_dir, - playbook_dir=ansible_dir, - skip_tags=parsed_args.skip_tags, - tags=parsed_args.tags, - ansible_cfg=ansible_cfg, - ssh_user='tripleo-admin', - limit_hosts=parsed_args.limit, - reproduce_command=True, - forks=parsed_args.ansible_forks, - extra_env_variables={ - "ANSIBLE_BECOME": True, - "ANSIBLE_PRIVATE_KEY_FILE": key_file - } - ) - self.log.info("Completed Minor Update Run.") diff --git a/tripleoclient/v1/overcloud_upgrade.py b/tripleoclient/v1/overcloud_upgrade.py deleted file mode 100644 index dfc2995da..000000000 --- a/tripleoclient/v1/overcloud_upgrade.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import os - -from oslo_config import cfg -from oslo_log import log as logging - -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient.exceptions import OvercloudUpgradeNotConfirmed - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils -from tripleoclient.v1.overcloud_deploy import DeployOvercloud -from tripleoclient.workflows import deployment -from tripleoclient.workflows import parameters - -CONF = cfg.CONF - - -class UpgradePrepare(DeployOvercloud): - """Run heat stack update for overcloud nodes to refresh heat stack outputs. - - The heat stack outputs are what we use later on to generate ansible - playbooks which deliver the major upgrade workflow. This is used as the - first step for a major upgrade of your overcloud. - """ - - operation = "Prepare" - - template = constants.UPGRADE_PREPARE_ENV - - forbidden_params = [] - - log = logging.getLogger(__name__ + ".UpgradePrepare") - - def get_parser(self, prog_name): - parser = super(UpgradePrepare, self).get_parser(prog_name) - - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - oooutils.ensure_run_as_normal_user() - - if (not parsed_args.yes - and not oooutils.prompt_user_for_confirmation( - constants.UPGRADE_PROMPT, self.log)): - raise OvercloudUpgradeNotConfirmed(constants.UPGRADE_NO) - - # Throw deprecation warning if service is enabled and - # ask user if upgrade should still be continued. - if parsed_args.environment_files: - oooutils.duplicate_param_check( - user_environments=parsed_args.environment_files - ) - oooutils.check_deprecated_service_is_enabled( - parsed_args.environment_files) - - # In case of update and upgrade we need to force the - # stack_only to true. The heat stack update will be performed - # by DeployOvercloud class but skipping the config download part. - parsed_args.stack_only = True - # Add the template attribute environment to set noops etc - templates_dir = (parsed_args.templates or - constants.TRIPLEO_HEAT_TEMPLATES) - parsed_args.environment_files = oooutils.prepend_environment( - parsed_args.environment_files, templates_dir, - self.template) - # Parse all environment files looking for undesired - # parameters - parameters.check_forbidden_params(self.log, - parsed_args.environment_files, - self.forbidden_params) - super(UpgradePrepare, self).take_action(parsed_args) - - deployment.get_hosts_and_enable_ssh_admin( - parsed_args.stack, - parsed_args.overcloud_ssh_network, - parsed_args.overcloud_ssh_user, - self.get_key_pair(parsed_args), - parsed_args.overcloud_ssh_port_timeout, - working_dir=self.working_dir, - verbosity=oooutils.playbook_verbosity(self=self), - heat_type=parsed_args.heat_type - ) - - self.log.info("Completed Overcloud Upgrade {} for stack " - "{}".format(self.operation, parsed_args.stack)) - - -class UpgradeRun(command.Command): - """Run major upgrade ansible playbooks on Overcloud nodes - - This will run the major upgrade ansible playbooks on the overcloud. - By default all playbooks are executed, that is the - upgrade_steps_playbook.yaml then the deploy_steps_playbook.yaml and - then the post_upgrade_steps_playbook.yaml. - The upgrade playbooks are made available after completion of the - 'overcloud upgrade prepare' command. This 'overcloud upgrade run' - command is the second step in the major upgrade workflow. - """ - - log = logging.getLogger(__name__ + ".UpgradeRun") - - def get_parser(self, prog_name): - parser = super(UpgradeRun, self).get_parser(prog_name) - parser.add_argument( - '--limit', - action='store', - required=True, - help=_("A string that identifies a single node or comma-separated" - "list of nodes the config-download Ansible playbook " - "execution will be limited to. For example: --limit" - " \"compute-0,compute-1,compute-5\".") - ) - parser.add_argument('--playbook', - nargs="*", - default=None, - help=_("Ansible playbook to use for the minor" - " update. Can be used multiple times." - " Set this to each of those playbooks in" - " consecutive invocations of this command" - " if you prefer to run them manually." - " Note: make sure to run all playbooks so" - " that all services are updated and running" - " with the target version configuration.") - ) - parser.add_argument('--static-inventory', - dest='static_inventory', - action="store", - default=None, - help=_('DEPRECATED: tripleo-ansible-inventory.yaml' - ' in working dir will be used.') - ) - parser.add_argument("--ssh-user", - dest="ssh_user", - action="store", - default="tripleo-admin", - help=_("DEPRECATED: Only tripleo-admin should be " - "used as ssh user.") - ) - parser.add_argument('--tags', - dest='tags', - action="store", - default="", - help=_('A string specifying the tag or comma ' - 'separated list of tags to be passed ' - 'as --tags to ansible-playbook.') - ) - parser.add_argument('--skip-tags', - dest='skip_tags', - action="store", - default="", - help=_('A string specifying the tag or comma ' - 'separated list of tags to be passed ' - 'as --skip-tags to ansible-playbook. ' - 'The currently supported values are ' - '\'validation\' and \'pre-upgrade\'. ' - 'In particular \'validation\' is useful ' - 'if you must re-run following a failed ' - 'upgrade and some services cannot be ' - 'started. ') - ) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud') - ) - parser.add_argument('-y', '--yes', default=False, - action='store_true', - help=_("Use -y or --yes to skip the confirmation " - "required before any upgrade " - "operation. Use this with caution! ") - ) - parser.add_argument( - '--ansible-forks', - action='store', - default=None, - type=int, - help=_('The number of Ansible forks to use for the' - ' config-download ansible-playbook command.') - ) - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - oooutils.ensure_run_as_normal_user() - - if (not parsed_args.yes - and not oooutils.prompt_user_for_confirmation( - constants.UPGRADE_PROMPT, self.log)): - raise OvercloudUpgradeNotConfirmed(constants.UPGRADE_NO) - - working_dir = oooutils.get_default_working_dir(parsed_args.stack) - config_download_dir = os.path.join(working_dir, 'config-download') - ansible_dir = os.path.join(config_download_dir, parsed_args.stack) - inventory_path = os.path.join(ansible_dir, - 'tripleo-ansible-inventory.yaml') - # NOTE(cloudnull): The string option "all" was a special default - # that is no longer relevant. To retain compatibility - # this condition has been put in place. - if not parsed_args.playbook or parsed_args.playbook == ['all']: - playbook = [os.path.join(ansible_dir, p) - for p in constants.MAJOR_UPGRADE_PLAYBOOKS] - else: - playbook = parsed_args.playbook - - key = oooutils.get_key(parsed_args.stack) - oooutils.run_ansible_playbook( - playbook=playbook, - inventory=inventory_path, - workdir=config_download_dir, - tags=parsed_args.tags, - skip_tags=parsed_args.skip_tags, - limit_hosts=oooutils.playbook_limit_parse( - limit_nodes=parsed_args.limit - ), - forks=parsed_args.ansible_forks, - key=key, - reproduce_command=True - ) - deployment.snapshot_dir(ansible_dir) - self.log.info("Completed Overcloud Major Upgrade Run.") diff --git a/tripleoclient/v1/tripleo_config.py b/tripleoclient/v1/tripleo_config.py deleted file mode 100644 index 0a716f8d4..000000000 --- a/tripleoclient/v1/tripleo_config.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import logging - -from cliff import command -from osc_lib.i18n import _ - -from tripleoclient import constants -from tripleoclient import utils - -# For ansible.cfg generation -from tripleo_common.utils import ansible - - -class GenerateAnsibleConfig(command.Command): - """Generate the default ansible.cfg for deployments.""" - - log = logging.getLogger(__name__ + ".GenerateAnsibleConfig") - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - # TODO(bogdando): drop that once using oslo.privsep - parser.add_argument( - '--deployment-user', - dest='deployment_user', - default='stack', - help=_('User who executes the tripleo config generate command. ' - 'Defaults to stack.') - ) - parser.add_argument('--output-dir', - dest='output_dir', - help=_("Directory to output ansible.cfg and " - "ansible.log files."), - default=constants.CLOUD_HOME_DIR) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - if utils.get_deployment_user() != parsed_args.deployment_user: - self.log.warning( - _('The --deployment-user value %s does not ' - 'match the user name executing this command!') % - parsed_args.deployment_user) - - ansible.write_default_ansible_cfg(parsed_args.output_dir, - parsed_args.deployment_user, - ssh_private_key=None) diff --git a/tripleoclient/v1/tripleo_deploy.py b/tripleoclient/v1/tripleo_deploy.py deleted file mode 100644 index 330969b76..000000000 --- a/tripleoclient/v1/tripleo_deploy.py +++ /dev/null @@ -1,1347 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import json -import logging -import netaddr -import os -import pwd -import shutil -import subprocess -import sys -import tempfile -import time -import traceback -import yaml - -from cliff import command -from heatclient.common import template_utils -from osc_lib.i18n import _ - -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import heat_launcher -from tripleoclient import utils - -from tripleo_common import constants as tc_constants -from tripleo_common.image import kolla_builder -from tripleo_common.utils import parameters -from tripleo_common.utils import passwords as password_utils - -# For ansible download and config generation -from tripleo_common.utils import ansible -from tripleo_common.inventory import TripleoInventory -from tripleo_common.utils import config - -DEPLOY_FAILURE_MESSAGE = """ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -Deployment Failed! - -ERROR: Heat log files: {0} - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -""" -DEPLOY_COMPLETION_MESSAGE = """ -######################################################## - -Deployment successful! - -######################################################## -""" -OUTPUT_ONLY_COMPLETION_MESSAGE = """ -######################################################## - -Deployment information successfully generated! - -######################################################## -""" -STANDALONE_COMPLETION_MESSAGE = """ -########################################################## - -Useful files: - -The clouds.yaml file is at {0} - -Use "export OS_CLOUD=standalone" before running the -openstack command. - -########################################################## -""" - - -class Deploy(command.Command): - """Deploy containerized Undercloud""" - - log = logging.getLogger(__name__ + ".Deploy") - auth_required = False - heat_pid = None - tht_render = None - output_dir = None - tmp_ansible_dir = None - deployment_user = None - ansible_dir = None - python_version = sys.version_info[0] - ansible_playbook_cmd = "ansible-playbook" - python_cmd = "python{}".format(python_version) - - def _is_undercloud_deploy(self, parsed_args): - role = parsed_args.standalone_role - stack = parsed_args.stack - return (role in ['Undercloud'] and stack in ['undercloud']) - - def _run_preflight_checks(self, parsed_args): - """Run preflight deployment checks - - Perform any pre-deployment checks that we want to run when deploying - standalone deployments. This is skipped when in output only mode or - when used with an undercloud. The undercloud has it's own set of - deployment preflight requirements. - - :param parsed_args: parsed arguments from the cli - """ - # we skip preflight checks for output only - if parsed_args.output_only or not parsed_args.preflight: - return - - # in standalone we don't want to fixup the /etc/hosts as we'll be - # managing that elsewhere during the deployment - utils.check_hostname(fix_etc_hosts=False, logger=self.log) - - # Users can use http_proxy and https_proxy as part of the deployment, - # however we need localhost to not be proxied because we use it to talk - # to our heat api. - utils.check_env_for_proxy(no_proxy_hosts=['127.0.0.1']) - - # NOTE(cjeanner) Quick'n'dirty way before we have proper - # escalation support through oslo.privsep - def _set_data_rights(self, file_name, user=None, - mode=0o600): - - u = user or self.deployment_user - u_flag = None - f_flag = None - if u: - if os.path.exists(file_name): - try: - pwd.getpwnam(u) - cmd = 'sudo chown -R %s %s' % (u, file_name) - subprocess.check_call(cmd.split()) - except KeyError: - u_flag = 'Unknown' - else: - f_flag = "Absent" - else: - u_flag = 'Undefined' - - if u_flag: - self.log.warning(_('%(u_f)s user "%(u)s". You might need to ' - 'manually set ownership after the deploy') - % {'u_f': u_flag, 'u': user}) - if f_flag: - self.log.warning(_('%(f)s file is %(f_f)s.') - % {'f': file_name, 'f_f': f_flag}) - else: - os.chmod(file_name, mode) - - def _get_roles_file_path(self, parsed_args): - """Return roles_file for the deployment""" - if not parsed_args.roles_file: - roles_file = os.path.join(parsed_args.templates, - constants.STANDALONE_ROLES_FILE) - else: - roles_file = parsed_args.roles_file - return roles_file - - def _get_networks_file_path(self, parsed_args): - """Return networks_file for the deployment""" - if not parsed_args.networks_file: - return os.path.join(parsed_args.templates, - constants.STANDALONE_NETWORKS_FILE) - return parsed_args.networks_file - - def _get_primary_role_name(self, roles_file_path, templates): - """Return the primary role name""" - roles_data = utils.fetch_roles_file( - roles_file_path, templates) - if not roles_data: - return 'Standalone' - - for r in roles_data: - if 'tags' in r and 'primary' in r['tags']: - return r['name'] - self.log.warning('No primary role found in roles_data, using ' - 'first defined role') - return roles_data[0]['name'] - - def _create_persistent_dirs(self): - """Creates temporary working directories""" - utils.makedirs(constants.STANDALONE_EPHEMERAL_STACK_VSTATE) - - def _create_working_dirs(self, stack_name='standalone'): - """Creates temporary working directories""" - if self.output_dir: - utils.makedirs(self.output_dir) - if not self.tht_render: - self.tht_render = os.path.join(self.output_dir, - 'tripleo-heat-installer-templates') - # Clear dir since we're using a static name and shutils.copytree - # needs the folder to not exist. We'll generate the - # contents each time. This should clear the folder on the first - # run of this function. - shutil.rmtree(self.tht_render, ignore_errors=True) - if not self.tmp_ansible_dir: - self.tmp_ansible_dir = tempfile.mkdtemp( - prefix=stack_name + '-ansible-', dir=self.output_dir) - - def _populate_templates_dir(self, source_templates_dir, - stack_name='standalone'): - """Creates template dir with templates - - * Copy --templates content into a working dir - created as 'output_dir/tripleo-heat-installer-templates'. - - :param source_templates_dir: string to a directory containing our - source templates - """ - self._create_working_dirs(stack_name) - if not os.path.exists(source_templates_dir): - raise exceptions.NotFound("%s templates directory does not exist " - "or permission denied" % - source_templates_dir) - if not os.path.exists(self.tht_render): - shutil.copytree(source_templates_dir, self.tht_render, - symlinks=True) - - def _cleanup_working_dirs(self, cleanup=False, user=None): - """Cleanup temporary working directories - - :param cleanup: Set to true if you DO want to cleanup the dirs - """ - if cleanup: - if self.tht_render and os.path.exists(self.tht_render): - shutil.rmtree(self.tht_render, ignore_errors=True) - - self.tht_render = None - if self.tmp_ansible_dir and os.path.exists(self.tmp_ansible_dir): - shutil.rmtree(self.tmp_ansible_dir) - self.tmp_ansible_dir = None - else: - self.log.warning(_("Not cleaning working directory %s") - % self.tht_render) - # TODO(cjeanner) drop that once using oslo.privsep - self._set_data_rights(self.tht_render, user=user, mode=0o700) - self.log.warning(_("Not cleaning ansible directory %s") - % self.tmp_ansible_dir) - # TODO(cjeanner) drop that once using oslo.privsep - self._set_data_rights(self.tmp_ansible_dir, user=user, mode=0o700) - - def _configure_puppet(self): - self.log.info(_('Configuring puppet modules symlinks ...')) - utils.bulk_symlink(self.log, constants.TRIPLEO_PUPPET_MODULES, - constants.PUPPET_MODULES, - constants.PUPPET_BASE) - - def _update_passwords_env(self, output_dir, user, passwords=None, - stack_name='standalone'): - old_pw_file = os.path.join(constants.CLOUD_HOME_DIR, - 'tripleo-' + stack_name + '-passwords.yaml') - pw_file = os.path.join(output_dir, - 'tripleo-' + stack_name + '-passwords.yaml') - - # Generated passwords take the lowest precedence, allowing - # custom overrides - stack_env = {'parameter_defaults': {}} - stack_env['parameter_defaults'] = password_utils.generate_passwords( - stack_env=stack_env) - # Check for the existence of a passwords file in the old location. - if os.path.exists(old_pw_file): - self.log.warning("Migrating {} to {}.".format( - old_pw_file, pw_file)) - try: - os.rename(old_pw_file, pw_file) - except Exception as e: - self.log.error("Error moving {} to {}".format( - old_pw_file, pw_file)) - self.log.error(e) - raise e - if os.path.exists(pw_file): - with open(pw_file) as pf: - stack_env['parameter_defaults'].update( - yaml.safe_load(pf.read())['parameter_defaults']) - self.log.warning("Reading passwords from %s" % pw_file) - - if passwords: - # These passwords are the DefaultPasswords so we only - # update if they don't already exist in stack_env - for p, v in passwords.items(): - if p not in stack_env['parameter_defaults']: - stack_env['parameter_defaults'][p] = v - - # Write out the password file in yaml for heat. - # This contains sensitive data so ensure it's not world-readable - with open(pw_file, 'w') as pf: - yaml.safe_dump(stack_env, pf, default_flow_style=False) - # TODO(cjeanner) drop that once using oslo.privsep - # Do not forget to re-add os.chmod 0o600 on that one! - self._set_data_rights(pw_file, user=user) - - return pw_file - - def _generate_hosts_parameters(self, parsed_args, p_ip): - hostname = utils.get_short_hostname() - domain = parsed_args.local_domain - - data = { - 'CloudName': p_ip, - 'CloudDomain': domain, - 'CloudNameInternal': '%s.internalapi.%s' % (hostname, domain), - 'CloudNameStorage': '%s.storage.%s' % (hostname, domain), - 'CloudNameStorageManagement': ('%s.storagemgmt.%s' - % (hostname, domain)), - 'CloudNameCtlplane': '%s.ctlplane.%s' % (hostname, domain), - } - return data - - def _ip_for_uri(self, ip_addr, ip_nw): - if ip_nw.version == 6: - return '[%s]' % ip_addr - return ip_addr - - def _generate_portmap_parameters(self, ip_addr, ip_nw, ctlplane_vip_addr, - public_vip_addr, stack_name='standalone', - role_name='Standalone'): - hostname = utils.get_short_hostname() - - # in order for deployed server network information to match correctly, - # we need to ensure the HostnameMap matches our hostname - hostname_map_name = "%s-%s-0" % (stack_name.lower(), role_name.lower()) - data = { - 'HostnameMap': { - hostname_map_name: '%s' % hostname - }, - # The settings below allow us to inject a custom public - # VIP. This requires use of the generated - # ../network/ports/external_from_pool.yaml resource in t-h-t. - 'IPPool': { - 'external': [public_vip_addr] - }, - 'ExternalNetCidr': '%s/%s' % (public_vip_addr, ip_nw.prefixlen), - # This requires use of the - # ../deployed-server/deployed-neutron-port.yaml resource in t-h-t - # We use this for the control plane VIP and the server IP itself - 'DeployedServerPortMap': { - ('%s-ctlplane' % hostname): { - 'fixed_ips': [{'ip_address': ip_addr}], - 'subnets': [{'cidr': str(ip_nw.cidr), - 'ip_version': ip_nw.version}], - 'network': {'tags': [str(ip_nw.cidr)]} - }, - 'control_virtual_ip': { - 'fixed_ips': [{'ip_address': ctlplane_vip_addr}], - 'subnets': [{'cidr': str(ip_nw.cidr), - 'ip_version': ip_nw.version}], - 'network': {'tags': [str(ip_nw.cidr)]} - }, - 'public_virtual_ip': { - 'fixed_ips': [{'ip_address': public_vip_addr}], - 'subnets': [{'cidr': str(ip_nw.cidr), - 'ip_version': ip_nw.version}], - 'network': {'tags': [str(ip_nw.cidr)]} - } - }, - 'NodePortMap': { - hostname: { - 'ctlplane': { - 'ip_address': ip_addr, - 'ip_address_uri': self._ip_for_uri(ip_addr, ip_nw), - 'ip_subnet': '%s/%s' % (ip_addr, ip_nw.prefixlen) - } - } - }, - 'ControlPlaneVipData': { - 'fixed_ips': [ - {'ip_address': ctlplane_vip_addr} - ], - 'name': 'control_virtual_ip', - 'network': { - 'tags': ['%s/%s' % (ctlplane_vip_addr, ip_nw.prefixlen)] - }, - 'subnets': [ - {'ip_version': ip_nw.version} - ] - }, - 'VipPortMap': { - 'external': { - 'ip_address': public_vip_addr, - 'ip_address_uri': self._ip_for_uri(public_vip_addr, ip_nw), - 'ip_subnet': '%s/%s' % (public_vip_addr, ip_nw.prefixlen) - } - } - } - return data - - def _kill_heat(self, parsed_args): - """Tear down heat installer and temp files - - Kill the heat launcher/installer process. - Teardown temp files created in the deployment process, - when cleanup is requested. - - """ - if self.heat_pid: - self.heat_launch.kill_heat(self.heat_pid) - pid, ret = os.waitpid(self.heat_pid, 0) - self.heat_pid = None - - def _launch_heat(self, parsed_args, output_dir): - # we do this as root to chown config files properly for docker, etc. - heat_launcher_path = os.path.join(output_dir, 'heat_launcher') - - if parsed_args.heat_user: - heat_user = parsed_args.heat_user - else: - heat_user = parsed_args.deployment_user - - if parsed_args.heat_native is not None and \ - parsed_args.heat_native.lower() == "false": - self.heat_launch = heat_launcher.HeatContainerLauncher( - api_port=parsed_args.heat_api_port, - all_container_image=parsed_args.heat_container_image, - user=heat_user, - heat_dir=heat_launcher_path) - else: - self.heat_launch = heat_launcher.HeatNativeLauncher( - api_port=parsed_args.heat_api_port, - user=heat_user, - heat_dir=heat_launcher_path, - use_root=True) - - # NOTE(dprince): we launch heat with fork exec because - # we don't want it to inherit our args. Launching heat - # as a "library" would be cool... but that would require - # more refactoring. It runs a single process and we kill - # it always below. - self.heat_pid = os.fork() - if self.heat_pid == 0: - if parsed_args.heat_native is not None and \ - parsed_args.heat_native.lower() == "true": - try: - uid = pwd.getpwnam(heat_user).pw_uid - gid = pwd.getpwnam(heat_user).pw_gid - except KeyError: - msg = _( - "Please create a %s user account before " - "proceeding.") % heat_user - self.log.error(msg) - raise exceptions.DeploymentError(msg) - os.setgid(gid) - os.setuid(uid) - self.heat_launch.heat_db_sync() - # Exec() never returns. - self.heat_launch.launch_heat() - - # NOTE(dprince): we use our own client here because we set - # auth_required=False above because keystone isn't running when this - # command starts - tripleoclients = self.app.client_manager.tripleoclient - orchestration_client = \ - tripleoclients.local_orchestration(parsed_args.heat_api_port) - - return orchestration_client - - def _normalize_user_templates(self, user_tht_root, tht_root, env_files=[]): - """copy environment files into tht render path - - This assumes any env file that includes user_tht_root has already - been copied into tht_root. - - :param user_tht_root: string path to the user's template dir - :param tht_root: string path to our deployed tht_root - :param env_files: list of paths to environment files - :return list of absolute pathed environment files that exist in - tht_root - """ - environments = [] - # normalize the user template path to ensure it doesn't have a trailing - # slash - user_tht = os.path.abspath(user_tht_root) - for env_path in env_files: - self.log.debug("Processing file %s" % env_path) - abs_env_path = os.path.abspath(env_path) - if (abs_env_path.startswith(user_tht_root) and - ((user_tht + '/') in env_path or - (user_tht + '/') in abs_env_path or - user_tht == abs_env_path or - user_tht == env_path)): - # file is in tht and will be copied, so just update path - new_env_path = env_path.replace(user_tht + '/', - tht_root + '/') - self.log.debug("Redirecting %s to %s" - % (abs_env_path, new_env_path)) - environments.append(new_env_path) - elif abs_env_path.startswith(tht_root): - self.log.debug("File already in tht_root %s") - environments.append(abs_env_path) - else: - self.log.debug("File outside of tht_root %s, copying in") - # file is outside of THT, just copy it in - # TODO(aschultz): probably shouldn't be flattened? - target_dest = os.path.join(tht_root, - os.path.basename(abs_env_path)) - if os.path.exists(target_dest): - raise exceptions.DeploymentError("%s already exists, " - "please rename the " - "file to something else" - % target_dest) - shutil.copy(abs_env_path, tht_root) - environments.append(target_dest) - return environments - - def _load_user_params(self, user_environments): - user_params = {} - for env_file in user_environments: - # undercloud heat stack virtual state tracking is not available yet - if env_file.endswith('-stack-vstate-dropin.yaml'): - continue - - with open(env_file, 'r') as f: - data = yaml.safe_load(f.read()) - - if data is None or data.get('parameter_defaults') is None: - continue - - for k, v in data.get('parameter_defaults', {}).items(): - user_params[k] = v - - return user_params - - def _setup_heat_environments(self, roles_file_path, networks_file_path, - parsed_args): - """Process tripleo heat templates with jinja and deploy into work dir - - * Process j2/install additional templates there - * Return the environments list for futher processing as a new base. - - The first two items are reserved for the - overcloud-resource-registry-puppet.yaml and passwords files. - """ - - self.log.warning(_("** Handling template files **")) - env_files = [] - - # TODO(aschultz): in overcloud deploy we have a --environments-dir - # we might want to handle something similar for this - # (shardy) alternatively perhaps we should rely on the plan-environment - # environments list instead? - if parsed_args.environment_files: - env_files.extend(parsed_args.environment_files) - - # ensure any user provided templates get copied into tht_render - user_environments = self._normalize_user_templates( - parsed_args.templates, self.tht_render, env_files) - - # generate jinja templates by its work dir location - self.log.debug(_("Using roles file %s") % roles_file_path) - utils.jinja_render_files(self.log, - templates=parsed_args.templates, - working_dir=self.tht_render, - roles_file=roles_file_path, - networks_file=networks_file_path, - output_dir=self.tht_render) - - # NOTE(aschultz): the next set of environment files are system included - # so we have to include them at the front of our environment list so a - # user can override anything in them. - - environments = [os.path.join(self.tht_render, - constants.DEFAULT_RESOURCE_REGISTRY)] - - # this will allow the user to overwrite passwords with custom envs - # or pick instack legacy passwords as is, if upgrading from instack - pw_file = self._update_passwords_env( - output_dir=self.output_dir, - user=parsed_args.deployment_user, - stack_name=parsed_args.stack.lower(), - ) - environments.append(pw_file) - - self.log.info(_("Deploying templates in the directory {0}").format( - os.path.abspath(self.tht_render))) - - maps_file = os.path.join(self.tht_render, - 'tripleoclient-hosts-portmaps.yaml') - ip_nw = netaddr.IPNetwork(parsed_args.local_ip) - ip = str(ip_nw.ip) - - if parsed_args.control_virtual_ip: - c_ip = parsed_args.control_virtual_ip - else: - c_ip = ip - - if parsed_args.public_virtual_ip: - p_ip = parsed_args.public_virtual_ip - else: - p_ip = ip - ip_version = str(ip_nw.version) - - role_name = self._get_primary_role_name( - roles_file_path, parsed_args.templates) - tmp_env = self._generate_hosts_parameters(parsed_args, p_ip) - tmp_env.update(self._generate_portmap_parameters( - ip, ip_nw, c_ip, p_ip, - stack_name=parsed_args.stack, - role_name=role_name)) - - user_params = self._load_user_params(user_environments) - host_routes = user_params.get('ControlPlaneStaticRoutes', []) - mtu = user_params.get('InterfaceLocalMtu', 1500) - redis_vip = user_params.get( - 'RedisVirtualFixedIPs', - [{'ip_address': c_ip, 'use_neutron': False}]) - ovn_dbs_vip = user_params.get( - 'OVNDBsVirtualFixedIPs', - [{'ip_address': c_ip, 'use_neutron': False}]) - - ovn_static_bridge_mac_map = user_params.get( - 'OVNStaticBridgeMacMappings', {}) - if not ovn_static_bridge_mac_map: - ovn_bridge_macs = ovn_static_bridge_mac_map.setdefault( - utils.get_short_hostname(), {}) - # NOTE: Hard coding the THT default for NeutronBridgeMappings - # unless user provided an override. - bridge_mappings = user_params.get('NeutronBridgeMappings', - ['datacentre:br-ex']) - # Handle heat comma_delimited_list - if isinstance(bridge_mappings, str) and bridge_mappings: - bridge_mappings = bridge_mappings.split(',') - physnets = [bridge.split(':')[0] for bridge in bridge_mappings] - for idx, physnet in enumerate(physnets): - ovn_bridge_macs[physnet] = 'fa:16:3a:00:53:{:02X}'.format(idx) - - tmp_env.update( - { - 'RedisVirtualFixedIPs': redis_vip, - 'OVNDBsVirtualFixedIPs': ovn_dbs_vip, - 'OVNStaticBridgeMacMappings': ovn_static_bridge_mac_map, - 'CtlplaneNetworkAttributes': { - 'network': { - 'mtu': mtu, - }, - 'subnets': { - 'ctlplane-subnet': { - 'cidr': str(ip_nw.cidr), - 'host_routes': host_routes, - 'ip_version': ip_version, - } - } - } - } - ) - - with open(maps_file, 'w') as env_file: - yaml.safe_dump({'parameter_defaults': tmp_env}, env_file, - default_flow_style=False) - environments.append(maps_file) - - # NOTE(aschultz): this doesn't get copied into tht_root but - # we always include the hieradata override stuff last. - if parsed_args.hieradata_override: - environments.append(self._process_hieradata_overrides( - parsed_args.hieradata_override, - parsed_args.standalone_role, - parsed_args.stack.lower())) - - # Create a persistent drop-in file to indicate the stack - # virtual state changes - stack_vstate_dropin = os.path.join(self.tht_render, - '%s-stack-vstate-dropin.yaml' % - parsed_args.stack) - with open(stack_vstate_dropin, 'w') as dropin_file: - yaml.safe_dump( - {'parameter_defaults': { - 'RootStackName': parsed_args.stack.lower(), - 'DeployIdentifier': int(time.time())}}, - dropin_file, default_flow_style=False) - environments.append(stack_vstate_dropin) - - return environments + user_environments - - def _prepare_container_images(self, env, roles_data): - image_params = kolla_builder.container_images_prepare_multi( - env, roles_data, dry_run=True) - - # use setdefault to ensure every needed image parameter is - # populated without replacing user-set values - if image_params: - pd = env.get('parameter_defaults', {}) - for k, v in image_params.items(): - pd.setdefault(k, v) - - def _deploy_tripleo_heat_templates(self, orchestration_client, - parsed_args): - """Deploy the fixed templates in TripleO Heat Templates""" - roles_file_path = self._get_roles_file_path(parsed_args) - networks_file_path = self._get_networks_file_path(parsed_args) - - # sets self.tht_render to the working dir with deployed templates - environments = self._setup_heat_environments( - roles_file_path, networks_file_path, parsed_args) - - # rewrite paths to consume t-h-t env files from the working dir - self.log.debug(_("Processing environment files %s") % environments) - env_files, env = utils.process_multiple_environments( - environments, self.tht_render, parsed_args.templates, - cleanup=parsed_args.cleanup) - - # check if we're trying to deploy ceph during the overcloud deployment - utils.check_deployed_ceph_stage(env) - - # check network plugin with undercloud upgrade - if parsed_args.upgrade and self._is_undercloud_deploy(parsed_args): - utils.check_network_plugin(parsed_args.output_dir, env) - - roles_data = utils.fetch_roles_file( - roles_file_path, parsed_args.templates) - - parameter_defaults = env.get('parameter_defaults', {}) - enabled_service_map = kolla_builder.get_enabled_services( - env, roles_data) - if enabled_service_map: - parameter_defaults.update(enabled_service_map) - - if not parsed_args.disable_container_prepare: - self._prepare_container_images(env, roles_data) - parameters.convert_docker_params(env) - - self.log.debug(_("Getting template contents")) - template_path = os.path.join(self.tht_render, 'overcloud.yaml') - template_files, template = \ - template_utils.get_template_contents(template_path) - - files = dict(list(template_files.items()) + list(env_files.items())) - - stack_name = parsed_args.stack - - self.log.debug(_("Deploying stack: %s") % stack_name) - self.log.debug(_("Deploying template: %s") % template) - self.log.debug(_("Deploying environment: %s") % env) - self.log.debug(_("Deploying files: %s") % files) - - stack_args = { - 'stack_name': stack_name, - 'template': template, - 'environment': env, - 'files': files, - } - - if parsed_args.timeout: - stack_args['timeout_mins'] = parsed_args.timeout - - self.log.warning(_("** Performing Heat stack create.. **")) - stack = orchestration_client.stacks.create(**stack_args) - if not stack: - msg = _('The ephemeral Heat stack could not be created, please ' - 'check logs in /var/log/heat-launcher and/or any ' - 'possible misconfiguration.') - raise exceptions.DeploymentError(msg) - - stack_id = stack['stack']['id'] - return "%s/%s" % (stack_name, stack_id) - - def _download_ansible_playbooks(self, client, stack_name, - tripleo_role_name='Standalone', - python_interpreter=sys.executable): - stack_config = config.Config(client) - self._create_working_dirs(stack_name.lower()) - - self.log.warning(_('** Downloading {0} ansible.. **').format( - stack_name)) - # python output buffering is making this seem to take forever.. - sys.stdout.flush() - stack_config.download_config(stack_name, self.tmp_ansible_dir) - - inventory = TripleoInventory( - hclient=client, - plan_name=stack_name, - ansible_ssh_user='root') - - inv_path = os.path.join(self.tmp_ansible_dir, 'inventory.yaml') - extra_vars = { - tripleo_role_name: { - 'ansible_connection': 'local', - 'ansible_python_interpreter': python_interpreter, - } - } - - inventory.write_static_inventory(inv_path, extra_vars) - # Move inventory in output_dir in order to be reusable by users: - shutil.copyfile(inv_path, - os.path.join(self.output_dir, - constants.TRIPLEO_STATIC_INVENTORY)) - # copy inventory file to Runner friendly path - shutil.copyfile(inv_path, os.path.join(self.tmp_ansible_dir, - 'inventory', 'tripleo')) - - self.log.info(_('** Downloaded {0} ansible to {1} **').format( - stack_name, self.tmp_ansible_dir)) - sys.stdout.flush() - return self.tmp_ansible_dir - - def _download_stack_outputs(self, client, stack_name): - stack = utils.get_stack(client, stack_name) - output_file = 'tripleo-{}-outputs.yaml'.format(stack_name) - endpointmap_file = os.path.join(self.output_dir, output_file) - - outputs = {} - endpointmap = utils.get_endpoint_map(self.output_dir) - if endpointmap: - outputs['EndpointMapOverride'] = endpointmap - - allnodescfg = utils.get_stack_output_item(stack, 'AllNodesConfig') - if allnodescfg: - outputs['AllNodesExtraMapData'] = allnodescfg - - hosts = utils.get_stack_output_item(stack, 'HostsEntry') - if hosts: - outputs['ExtraHostFileEntries'] = hosts - - self._create_working_dirs(stack_name.lower()) - output = {'parameter_defaults': outputs} - with open(endpointmap_file, 'w') as f: - yaml.safe_dump(output, f, default_flow_style=False) - return output - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - parser.add_argument( - '--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES, - help=_("The directory containing the Heat templates to deploy"), - default=constants.TRIPLEO_HEAT_TEMPLATES - ) - parser.add_argument('--upgrade', default=False, action='store_true', - help=_("Upgrade an existing deployment.")) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_("Skip yes/no prompt (assume yes).")) - parser.add_argument('--stack', - help=_("Name for the ephemeral (one-time create " - "and forget) heat stack."), - default='standalone') - parser.add_argument('--output-dir', - dest='output_dir', - help=_("Directory to output state, processed heat " - "templates, ansible deployment files.\n" - "Defaults to ~/tripleo-deploy/")) - parser.add_argument('--output-only', - dest='output_only', - action='store_true', - default=False, - help=_("Do not execute the Ansible playbooks. By" - " default the playbooks are saved to the" - " output-dir and then executed.")), - parser.add_argument('--standalone-role', default='Standalone', - help=_("The role to use for standalone " - "configuration when populating the " - "deployment actions.")) - parser.add_argument('-t', '--timeout', metavar='', - type=int, default=30, - help=_('Deployment timeout in minutes.')) - parser.add_argument( - '-e', '--environment-file', metavar='', - action='append', dest='environment_files', - help=_('Environment files to be passed to the heat stack-create ' - 'or heat stack-update command. (Can be specified more than ' - 'once.)') - ) - parser.add_argument( - '--roles-file', '-r', dest='roles_file', - help=_( - 'Roles file, overrides the default %s in the t-h-t templates ' - 'directory used for deployment. May be an ' - 'absolute path or the path relative to the templates dir.' - ) % constants.STANDALONE_ROLES_FILE - ) - parser.add_argument( - '--networks-file', '-n', dest='networks_file', - help=_( - 'Roles file, overrides the default %s in the t-h-t templates ' - 'directory used for deployment. May be an ' - 'absolute path or the path relative to the templates dir.' - ) % constants.STANDALONE_NETWORKS_FILE - ) - parser.add_argument( - '--plan-environment-file', '-p', - help=_('DEPRECATED: Plan Environment file, Not supported') - ) - parser.add_argument( - '--heat-api-port', metavar='', - dest='heat_api_port', - default='8006', - help=_('Heat API port to use for the installers private' - ' Heat API instance. Optional. Default: 8006.)') - ) - parser.add_argument( - '--heat-user', metavar='', - dest='heat_user', - help=_('User to execute the non-privileged heat-all process. ' - 'Defaults to the value of --deployment-user.') - ) - # TODO(cjeanner) drop that once using oslo.privsep - parser.add_argument( - '--deployment-user', - dest='deployment_user', - default=os.environ.get('SUDO_USER', 'stack'), - help=_('User who executes the tripleo deploy command. ' - 'Defaults to $SUDO_USER. If $SUDO_USER is unset ' - 'it defaults to stack.') - ) - parser.add_argument('--deployment-python-interpreter', default=None, - help=_('The path to python interpreter to use for ' - 'the deployment actions. If not specified ' - 'the python version of the openstackclient ' - 'will be used. This may need to be used ' - 'if deploying on a python2 host from a ' - 'python3 system or vice versa.')) - parser.add_argument( - '--heat-container-image', metavar='', - dest='heat_container_image', - default=constants.DEFAULT_HEAT_CONTAINER, - help=_('The container image to use when launching the heat-all ' - 'process. Defaults to: {}'.format( - constants.DEFAULT_HEAT_CONTAINER)) - ) - parser.add_argument( - '--heat-native', - dest='heat_native', - nargs='?', - default=None, - const="true", - help=_('Execute the heat-all process natively on this host. ' - 'This option requires that the heat-all binaries ' - 'be installed locally on this machine. ' - 'This option is enabled by default which means heat-all is ' - 'executed on the host OS directly.') - ) - parser.add_argument( - '--local-ip', metavar='', - dest='local_ip', - help=_('Local IP/CIDR for standalone traffic. Required.') - ) - parser.add_argument( - '--control-virtual-ip', metavar='', - dest='control_virtual_ip', - help=_('Control plane VIP. This allows the standalone installer ' - 'to configure a custom VIP on the control plane.') - ) - parser.add_argument( - '--public-virtual-ip', metavar='', - dest='public_virtual_ip', - help=_('Public nw VIP. This allows the standalone installer ' - 'to configure a custom VIP on the public (external) NW.') - ) - parser.add_argument( - '--local-domain', metavar='', - dest='local_domain', - default='localdomain', - help=_('Local domain for standalone cloud and its API endpoints') - ) - parser.add_argument( - '--cleanup', - action='store_true', default=False, - help=_('Cleanup temporary files. Using this flag will ' - 'remove the temporary files used during deployment in ' - 'after the command is run.'), - - ) - parser.add_argument( - '--hieradata-override', nargs='?', - help=_('Path to hieradata override file. When it points to a heat ' - 'env file, it is passed in t-h-t via --environment-file. ' - 'When the file contains legacy instack data, ' - 'it is wrapped with ExtraConfig and also ' - 'passed in for t-h-t as a temp file created in ' - '--output-dir. Note, instack hiera data may be ' - 'not t-h-t compatible and will highly likely require a ' - 'manual revision.') - ) - parser.add_argument( - '--keep-running', - action='store_true', - default=False, - help=_('Keep the ephemeral Heat running after the stack operation ' - 'is complete. This is for debugging purposes only. ' - 'The ephemeral Heat can be used by openstackclient with:\n' - 'OS_AUTH_TYPE=none ' - 'OS_ENDPOINT=http://127.0.0.1:8006/v1/admin ' - 'openstack stack list\n' - 'where 8006 is the port specified by --heat-api-port.') - ) - parser.add_argument( - '--preflight-validations', - action='store_true', - default=False, - dest='preflight', - help=_('Activate pre-flight validations before starting ' - 'the actual deployment process.') - ) - parser.add_argument( - '--inflight-validations', - action='store_true', - default=False, - dest='inflight', - help=_('Activate in-flight validations during the deploy. ' - 'In-flight validations provide a robust way to ensure ' - 'deployed services are running right after their ' - 'activation. Defaults to False.') - ) - parser.add_argument( - '--transport', - action='store', - default='local', - help=_('Transport mechanism to use for ansible.' - 'Use "ssh" for multinode deployments. ' - 'Use "local" for standalone deployments. ' - 'Defaults to "local".') - ) - parser.add_argument( - '--ansible-forks', - action='store', - default=None, - type=int, - help=_('The number of Ansible forks to use for the' - ' config-download ansible-playbook command.') - ) - parser.add_argument( - '--disable-container-prepare', - action='store_true', - default=False, - help=_('Disable the container preparation actions to prevent ' - 'container tags from being updated and new containers ' - 'from being fetched. If you skip this but do not have ' - 'the container parameters configured, the deployment ' - 'action may fail.') - ) - parser.add_argument( - '--reproduce-command', - action='store_true', - default=False, - help=_('Create a reproducer command with ansible command' - 'line and all environments variables.') - ) - - stack_action_group = parser.add_mutually_exclusive_group() - - stack_action_group.add_argument( - '--force-stack-update', - dest='force_stack_update', - action='store_true', - default=False, - help=_("DEPRECATED: Do a virtual update of the ephemeral " - "heat stack (it cannot take real updates). " - "New or failed deployments " - "always have the stack_action=CREATE. This " - "option enforces stack_action=UPDATE. Not Supported."), - ) - stack_action_group.add_argument( - '--force-stack-create', - dest='force_stack_create', - action='store_true', - default=False, - help=_("DEPRECATED: Do a virtual create of the ephemeral " - "heat stack. New or failed deployments " - "always have the stack_action=CREATE. This " - "option enforces stack_action=CREATE. Not Supported"), - ) - return parser - - def _process_hieradata_overrides(self, override_file=None, - tripleo_role_name='Standalone', - stack_name='standalone'): - """Count in hiera data overrides including legacy formats - - Return a file name that points to processed hiera data overrides file - """ - if not override_file or not os.path.exists(override_file): - # we should never get here because there's a check in - # undercloud_conf but stranger things have happened. - msg = (_('hieradata_override file could not be found %s') % - override_file) - self.log.error(msg) - raise exceptions.DeploymentError(msg) - - target = override_file - with open(target, 'rb') as fb: - data = fb.read() - if not data.strip(): - # since an empty file isn't valid yaml, let's be more specific - msg = (_("hieradata override file (%s) cannot be empty") % target) - self.log.error(msg) - raise exceptions.DeploymentError(msg) - - hiera_data = yaml.safe_load(data) - if not hiera_data: - msg = (_('Unsupported data format in hieradata override %s') % - target) - self.log.error(msg) - raise exceptions.DeploymentError(msg) - self._create_working_dirs(stack_name) - - # NOTE(bogdando): In t-h-t, hiera data should come in wrapped as - # {parameter_defaults: {StandaloneExtraConfig: ... }} - extra_config_var = '%sExtraConfig' % tripleo_role_name - if (extra_config_var not in hiera_data.get('parameter_defaults', {})): - hiera_override_file = os.path.join( - self.tht_render, 'tripleo-hieradata-override.yaml') - self.log.info('Converting hiera overrides for t-h-t from ' - 'legacy format into a file %s' % - hiera_override_file) - with open(hiera_override_file, 'w') as override: - yaml.safe_dump( - {'parameter_defaults': { - extra_config_var: hiera_data}}, - override, - default_flow_style=False) - target = hiera_override_file - return target - - def _dump_ansible_errors(self, f, name): - if not os.path.isfile(f): - return - - failures = None - with open(f, 'r') as ff: - try: - failures = json.load(ff) - except (json.JSONDecodeError, TypeError) as ex: - self.log.error(_( - 'Could not read ansible errors from file {}.\n' - 'Encountered {}').format( - ex, - ff)) - - if not failures or not failures.get(name, {}): - return - - self.log.error(_('** Found ansible errors for %s deployment! **') % - name) - self.log.error(json.dumps(failures.get(name, {}), indent=1)) - - def _standalone_deploy(self, parsed_args): - extra_env_var = dict() - - if self._is_undercloud_deploy(parsed_args): - extra_env_var['ANSIBLE_LOG_PATH'] = os.path.join( - parsed_args.output_dir, constants.UNDERCLOUD_LOG_FILE) - - if not parsed_args.local_ip: - msg = _('Please set --local-ip to the correct ' - 'ipaddress/cidr for this machine.') - self.log.error(msg) - raise exceptions.DeploymentError(msg) - - if not os.environ.get('HEAT_API_PORT'): - os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port - - # The main thread runs as root and we drop privs for forked - # processes below. Only the heat deploy/os-collect-config forked - # process runs as root. - if os.geteuid() != 0: - msg = _("Please run as root.") - self.log.error(msg) - raise exceptions.DeploymentError(msg) - - self._run_preflight_checks(parsed_args) - - output_dir = utils.get_output_dir(parsed_args.output_dir, - parsed_args.stack) - - self.output_dir = os.path.abspath(output_dir) - - self._create_working_dirs(parsed_args.stack.lower()) - # The state that needs to be persisted between serial deployments - # and cannot be contained in ephemeral heat stacks or working dirs - self._create_persistent_dirs() - - # configure puppet - self._configure_puppet() - - # copy the templates dir in place - self._populate_templates_dir(parsed_args.templates, - parsed_args.stack.lower()) - - is_complete = False - try: - # Launch heat. - orchestration_client = self._launch_heat(parsed_args, output_dir) - # Wait for heat to be ready. - utils.wait_api_port_ready(parsed_args.heat_api_port) - # Deploy TripleO Heat templates. - stack_id = \ - self._deploy_tripleo_heat_templates(orchestration_client, - parsed_args) - - # Wait for complete.. - status = utils.wait_for_stack_ready(orchestration_client, stack_id, - nested_depth=6) - if not status: - message = _("Stack create failed") - self.log.error(message) - raise exceptions.DeploymentError(message) - - # download the ansible playbooks and execute them. - depl_python = utils.get_deployment_python_interpreter(parsed_args) - self.ansible_dir = \ - self._download_ansible_playbooks(orchestration_client, - parsed_args.stack, - parsed_args.standalone_role, - depl_python) - - # output an file with EndpointMapOverride for use with other stacks - self._download_stack_outputs(orchestration_client, - parsed_args.stack) - - # Do not override user's custom ansible configuraition file, - # it may have been pre-created with the tripleo CLI, or the like - ansible_config = os.path.join(self.output_dir, 'ansible.cfg') - if not os.path.isfile(ansible_config): - self.log.warning( - _('Generating default ansible config file %s') % - ansible_config) - # FIXME(bogdando): unhardcode key for future - # multi-node - ansible.write_default_ansible_cfg( - self.ansible_dir, - parsed_args.deployment_user, - ssh_private_key=None, - transport=parsed_args.transport) - else: - self.log.warning( - _('Using the existing %s for deployment') % ansible_config) - shutil.copy(ansible_config, self.ansible_dir) - - extra_args = dict() - if not parsed_args.inflight: - extra_args = {'skip_tags': 'opendev-validation'} - # Kill heat, we're done with it now. - if not parsed_args.keep_running: - self._kill_heat(parsed_args) - if not parsed_args.output_only: - operations = list() - if parsed_args.upgrade: - # Run Upgrade tasks before the deployment - operations.append( - constants.DEPLOY_ANSIBLE_ACTIONS['upgrade'] - ) - operations.append( - constants.DEPLOY_ANSIBLE_ACTIONS['deploy'] - ) - if parsed_args.upgrade: - # Run Post Upgrade tasks after the deployment - operations.append( - constants.DEPLOY_ANSIBLE_ACTIONS['post-upgrade'] - ) - # Run Online Upgrade tasks after the deployment - operations.append( - constants.DEPLOY_ANSIBLE_ACTIONS['online-upgrade'] - ) - with utils.Pushd(self.ansible_dir): - for operation in operations: - for k, v in extra_args.items(): - if k in operation: - operation[k] = ','.join([operation[k], v]) - else: - operation[k] = v - utils.run_ansible_playbook( - inventory=os.path.join( - self.ansible_dir, - 'inventory' - ), - workdir=self.ansible_dir, - verbosity=utils.playbook_verbosity(self=self), - extra_env_variables=extra_env_var, - forks=parsed_args.ansible_forks, - reproduce_command=parsed_args.reproduce_command, - **operation) - is_complete = True - finally: - if not parsed_args.keep_running: - self._kill_heat(parsed_args) - tar_filename = \ - utils.archive_deploy_artifacts( - self.log, - parsed_args.stack.lower(), - self.output_dir) - - if self.ansible_dir: - self._dump_ansible_errors( - os.path.join(self.ansible_dir, - tc_constants.ANSIBLE_ERRORS_FILE), - parsed_args.stack) - self._cleanup_working_dirs( - cleanup=parsed_args.cleanup, - user=parsed_args.deployment_user - ) - self._set_data_rights( - os.path.join(constants.CLOUD_HOME_DIR, '.tripleo'), - user=parsed_args.deployment_user, - mode=0o700) - if tar_filename: - self.log.warning('Install artifact is located at %s' % - tar_filename) - if not is_complete: - self.log.error(DEPLOY_FAILURE_MESSAGE.format( - self.heat_launch.install_dir - )) - else: - # We only get here if no errors - if parsed_args.output_only: - success_messaging = OUTPUT_ONLY_COMPLETION_MESSAGE - else: - success_messaging = DEPLOY_COMPLETION_MESSAGE - - if not self._is_undercloud_deploy(parsed_args): - success_messaging = success_messaging + \ - STANDALONE_COMPLETION_MESSAGE.format( - '~/.config/openstack/clouds.yaml') - - self.log.warning(success_messaging) - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - if parsed_args.deployment_user == 'root': - self.log.warning( - _("[WARNING] Deployment user is set to 'root'. This may cause " - "some deployment files to be located in /root. Please use " - "--deployment-user to specify the user you are deploying " - "with.")) - try: - self._standalone_deploy(parsed_args) - except Exception as ex: - self.log.error("Exception: %s" % str(ex)) - self.log.error(traceback.print_exc()) - raise exceptions.DeploymentError(str(ex)) - finally: - # Copy clouds.yaml from /etc/openstack so credentials can be - # read by the deployment user and not only root. - utils.copy_clouds_yaml(parsed_args.deployment_user) - - # send erase sequence to reset the cmdline if ansible - # mangled some escape sequences - utils.reset_cmdline() diff --git a/tripleoclient/v1/tripleo_launch_heat.py b/tripleoclient/v1/tripleo_launch_heat.py deleted file mode 100644 index 8e8a067ec..000000000 --- a/tripleoclient/v1/tripleo_launch_heat.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import getpass -import logging -import os - -from osc_lib.i18n import _ - -from tripleoclient import command -from tripleoclient.constants import (DEFAULT_EPHEMERAL_HEAT_CONTAINER, - DEFAULT_EPHEMERAL_HEAT_API_CONTAINER, - DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER) -from tripleoclient import exceptions -from tripleoclient import utils - - -class LaunchHeat(command.Command): - """Launch ephemeral Heat process.""" - - log = logging.getLogger("tripleoclient") - auth_required = False - heat_pid = None - - def _kill_heat(self, parsed_args): - """Tear down heat installer and temp files - - Kill the heat launcher/installer process. - Teardown temp files created in the deployment process, - when cleanup is requested. - - """ - if parsed_args.heat_type == "native": - self.log.info("Attempting to kill ephemeral heat") - if self.heat_pid: - self.log.info("Using heat pid: %s" % self.heat_pid) - self.heat_launcher.kill_heat(self.heat_pid) - pid, ret = os.waitpid(self.heat_pid, 0) - self.heat_pid = None - else: - self.log.info("No heat pid set, can't kill.") - - return 0 - - def _launch_heat(self, parsed_args): - self.log.info("Launching Heat %s" % parsed_args.heat_type) - utils.launch_heat(self.heat_launcher, parsed_args.restore_db) - return 0 - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--heat-api-port', metavar='', - dest='heat_api_port', - default='8006', - help=_('Heat API port to use for the installers private' - ' Heat API instance. Optional.') - ) - parser.add_argument( - '--heat-user', metavar='', - dest='heat_user', - default=getpass.getuser(), - help=_('User to execute the non-privileged heat-all process. ' - 'Defaults to current user. ' - 'If the configuration files /etc/heat/heat.conf or ' - '/usr/share/heat/heat-dist.conf exist, the user ' - 'must have read access to those files.\n' - 'This option is ignored when using --heat-type=container ' - 'or --heat-type=pod') - ) - parser.add_argument( - '--heat-container-image', metavar='', - dest='heat_container_image', - default=DEFAULT_EPHEMERAL_HEAT_CONTAINER, - help=_('The container image to use when launching the heat-all ' - 'process. Defaults to: {}'.format( - DEFAULT_EPHEMERAL_HEAT_CONTAINER)) - ) - parser.add_argument( - '--heat-container-api-image', - metavar='', - dest='heat_container_api_image', - default=DEFAULT_EPHEMERAL_HEAT_API_CONTAINER, - help=_('The container image to use when launching the heat-api ' - 'process. Only used when --heat-type=pod. ' - 'Defaults to: {}'.format( - DEFAULT_EPHEMERAL_HEAT_API_CONTAINER)) - ) - parser.add_argument( - '--heat-container-engine-image', - metavar='', - dest='heat_container_engine_image', - default=DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER, - help=_('The container image to use when launching the heat-engine ' - 'process. Only used when --heat-type=pod. ' - 'Defaults to: {}'.format( - DEFAULT_EPHEMERAL_HEAT_ENGINE_CONTAINER)) - ) - parser.add_argument( - '--kill', '-k', - dest='kill', - action='store_true', - default=False, - help=_('Kill the running heat process (if found).') - ) - parser.add_argument( - '--heat-dir', - dest='heat_dir', - action='store', - default=os.path.join( - utils.get_default_working_dir('overcloud'), - 'heat-launcher'), - help=_("Directory to use for file storage and logs of the " - "running heat process. in the current directory. Can be " - "set to an already existing directory to reuse the " - "environment from a previos Heat process.") - ) - parser.add_argument( - '--rm-heat', - action='store_true', - default=False, - help=_('If specified and --heat-type is container or pod ' - 'any existing container or pod of a previous ' - 'ephemeral Heat process will be deleted first. ' - 'Ignored if --heat-type is native or --kill.') - ) - parser.add_argument( - '--skip-heat-pull', - action='store_true', - default=False, - help=_('When --heat-type is pod or container, assume ' - 'the container image has already been pulled ') - ) - parser.add_argument( - '--restore-db', - action='store_true', - default=False, - help=_('Restore a database dump if it exists ' - 'within the directory specified by --heat-dir') - ) - heat_type_group = parser.add_mutually_exclusive_group() - heat_type_group.add_argument( - '--heat-native', - dest='heat_native', - action='store_true', - default=False, - help=_('(DEPRECATED): Execute the heat-all process natively on ' - 'this host. ' - 'This option requires that the heat-all binaries ' - 'be installed locally on this machine. ' - 'This option is enabled by default which means heat-all is ' - 'executed on the host OS directly.\n' - 'Conflicts with --heat-type, which deprecates ' - '--heat-native.') - ) - heat_type_group.add_argument( - '--heat-type', - dest='heat_type', - default='pod', - choices=['native', 'container', 'pod'], - help=_('Type of ephemeral Heat process to launch. One of: ' - 'native: Execute heat-all directly on the host. ' - 'container: Execute heat-all in a container. ' - 'pod: Execute separate heat api and engine processes in ' - 'a podman pod.') - ) - return parser - - def take_action(self, parsed_args): - self._configure_logging(parsed_args) - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.heat_native: - heat_type = "native" - else: - heat_type = parsed_args.heat_type - - if parsed_args.kill: - rm_heat = True - else: - rm_heat = parsed_args.rm_heat - - self.heat_launcher = utils.get_heat_launcher( - heat_type, parsed_args.heat_api_port, - parsed_args.heat_container_image, - parsed_args.heat_container_api_image, - parsed_args.heat_container_engine_image, - parsed_args.heat_user, - parsed_args.heat_dir, - False, - False, - rm_heat, - parsed_args.skip_heat_pull) - - if parsed_args.kill: - if self._kill_heat(parsed_args) != 0: - msg = _('Heat kill failed.') - self.log.error(msg) - raise exceptions.DeploymentError(msg) - else: - if self._launch_heat(parsed_args) != 0: - msg = _('Heat launch failed.') - self.log.error(msg) - raise exceptions.DeploymentError(msg) - else: - self.log.info("Writing heat clouds.yaml") - utils.write_ephemeral_heat_clouds_yaml(parsed_args.heat_dir) diff --git a/tripleoclient/v1/tripleo_upgrade.py b/tripleoclient/v1/tripleo_upgrade.py deleted file mode 100644 index 38a809ab0..000000000 --- a/tripleoclient/v1/tripleo_upgrade.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from oslo_config import cfg -from oslo_log import log as logging - -from tripleoclient import constants -from tripleoclient.exceptions import UndercloudUpgradeNotConfirmed -from tripleoclient import utils -from tripleoclient.v1.tripleo_deploy import Deploy - -CONF = cfg.CONF - - -class Upgrade(Deploy): - """Upgrade TripleO""" - - log = logging.getLogger(__name__ + ".Upgrade") - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.log.debug("take_action(%s)" % parsed_args) - if (not parsed_args.yes - and not utils.prompt_user_for_confirmation( - constants.UPGRADE_PROMPT, self.log)): - raise UndercloudUpgradeNotConfirmed(constants.UPGRADE_NO) - - parsed_args.standalone = True - parsed_args.upgrade = True - super(Upgrade, self).take_action(parsed_args) diff --git a/tripleoclient/v1/tripleo_validator.py b/tripleoclient/v1/tripleo_validator.py deleted file mode 100644 index 17b92cb71..000000000 --- a/tripleoclient/v1/tripleo_validator.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import logging -from tripleoclient import constants - -from validations_libs.cli.community import CommunityValidationInit -from validations_libs.cli.history import GetHistory -from validations_libs.cli.history import ListHistory -from validations_libs.cli.lister import ValidationList -from validations_libs.cli.run import Run -from validations_libs.cli.show import Show -from validations_libs.cli.show import ShowGroup -from validations_libs.cli.show import ShowParameter - - -LOG = logging.getLogger(__name__) - - -class TripleOValidatorList(ValidationList): - """List the available validations""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorList, self).get_parser(parser) - return parser - - -class TripleOValidatorShow(Show): - """Display detailed information about a Validation""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorShow, self).get_parser(parser) - return parser - - -class TripleOValidatorGroupInfo(ShowGroup): - """Display detailed information about a Group""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorGroupInfo, self).get_parser(parser) - return parser - - -class TripleOValidatorShowParameter(ShowParameter): - """Display Validations Parameters""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorShowParameter, self).get_parser(parser) - return parser - - -class TripleOValidatorRun(Run): - """Run the available validations""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorRun, self).get_parser(parser) - default = {'validation_log_dir': constants.VALIDATIONS_LOG_BASEDIR} - parser.set_defaults(**default) - return parser - - -class TripleOValidatorCommunityInit(CommunityValidationInit): - """Create the paths and infrastructure to create a community validation""" - - auth_required = False - - def get_parser(self, parser): - parser = super( - TripleOValidatorCommunityInit, self).get_parser(parser) - return parser - - -class TripleOValidatorShowHistory(ListHistory): - """Display Validations execution history""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorShowHistory, self).get_parser(parser) - default = {'validation_log_dir': constants.VALIDATIONS_LOG_BASEDIR} - parser.set_defaults(**default) - return parser - - -class TripleOValidatorShowRun(GetHistory): - """Display details about a Validation execution""" - - auth_required = False - - def get_parser(self, parser): - parser = super(TripleOValidatorShowRun, self).get_parser(parser) - default = {'validation_log_dir': constants.VALIDATIONS_LOG_BASEDIR} - parser.set_defaults(**default) - return parser diff --git a/tripleoclient/v1/undercloud.py b/tripleoclient/v1/undercloud.py deleted file mode 100644 index 40e562d44..000000000 --- a/tripleoclient/v1/undercloud.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Plugin action implementation""" - -import argparse -import logging -import os -import subprocess - -from openstackclient.i18n import _ - -from oslo_config import cfg - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import utils -from tripleoclient.v1 import undercloud_config - -UNDERCLOUD_FAILURE_MESSAGE = """ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -An error has occured while deploying the Undercloud. - -See the previous output for details about what went wrong. - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -""" - -UNDERCLOUD_COMPLETION_MESSAGE = """ -########################################################## - -The Undercloud has been successfully installed. - -Useful files: - -Password file is at {0} -The stackrc file is at {1} - -Use these files to interact with OpenStack services, and -ensure they are secured. - -########################################################## -""" -UNDERCLOUD_UPGRADE_COMPLETION_MESSAGE = """ -########################################################## - -The Undercloud has been successfully upgraded. - -Useful files: - -Password file is at {0} -The stackrc file is at {1} - -Use these files to interact with OpenStack services, and -ensure they are secured. - -########################################################## -""" - - -class InstallUndercloud(command.Command): - """Install and setup the undercloud""" - - auth_required = False - log = logging.getLogger(__name__ + ".InstallUndercloud") - osloconfig = cfg.CONF - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - parser.add_argument('--force-stack-update', - dest='force_stack_update', - action='store_true', - default=False, - help=_("Do a virtual update of the ephemeral " - "heat stack. New or failed deployments " - "always have the stack_action=CREATE. This " - "option enforces stack_action=UPDATE."), - ) - parser.add_argument( - '--no-validations', - dest='no_validations', - action='store_true', - default=False, - help=_("Do not perform undercloud configuration validations"), - ) - parser.add_argument( - '--inflight-validations', - dest='inflight', - action='store_true', - default=False, - help=_('Activate in-flight validations during the deploy. ' - 'In-flight validations provide a robust way to ensure ' - 'deployed services are running right after their ' - 'activation. Defaults to False.') - ) - parser.add_argument( - '--dry-run', - dest='dry_run', - action='store_true', - default=False, - help=_("Print the install command instead of running it"), - ) - parser.add_argument('-y', '--yes', default=False, - action='store_true', - help=_("Skip yes/no prompt (assume yes).")) - parser.add_argument( - '--disable-container-prepare', - action='store_true', - default=False, - help=_('Disable the container preparation actions to prevent ' - 'container tags from being updated and new containers ' - 'from being fetched. If you skip this but do not have ' - 'the container parameters configured, the deployment ' - 'action may fail.') - ) - parser.add_argument( - '--reproduce-command', - action='store_true', - default=False, - help=_('Create a reproducer command with ansible command' - 'line and all environments variables.') - ) - return parser - - def take_action(self, parsed_args): - output_dir = utils.get_output_dir(self.osloconfig.get('output_dir')) - # Fetch configuration used to add logging to a file - utils.load_config(self.osloconfig, constants.UNDERCLOUD_CONF_PATH) - utils.configure_logging(self.log, self.app_args.verbose_level, - self.osloconfig['undercloud_log_file']) - self.log.debug("take_action(%s)" % parsed_args) - - utils.ensure_run_as_normal_user() - no_validations = parsed_args.dry_run or parsed_args.no_validations - inflight = not parsed_args.dry_run and parsed_args.inflight - - cmd = undercloud_config.prepare_undercloud_deploy( - no_validations=no_validations, - verbose_level=self.app_args.verbose_level, - force_stack_update=parsed_args.force_stack_update, - dry_run=parsed_args.dry_run, - inflight=inflight, - reproduce_command=parsed_args.reproduce_command, - disable_container_prepare=parsed_args.disable_container_prepare) - - self.log.warning("Running: %s" % ' '.join(cmd)) - if not parsed_args.dry_run: - try: - subprocess.check_call(cmd) - self.log.warning(UNDERCLOUD_COMPLETION_MESSAGE.format( - os.path.join( - output_dir, - 'tripleo-undercloud-passwords.yaml' - ), - '~/stackrc' - )) - except Exception as e: - self.log.error(UNDERCLOUD_FAILURE_MESSAGE) - self.log.error(e) - raise exceptions.DeploymentError(e) - - -class UpgradeUndercloud(InstallUndercloud): - """Upgrade undercloud""" - - auth_required = False - log = logging.getLogger(__name__ + ".UpgradeUndercloud") - osloconfig = cfg.CONF - - def get_parser(self, prog_name): - parser = super(UpgradeUndercloud, self).get_parser(prog_name) - parser.add_argument('--skip-package-updates', - dest='skip_package_updates', - action='store_true', - default=False, - help=_("Flag to skip the package update when " - "performing upgrades and updates"), - ) - return parser - - def _update_extra_packages(self, packages=[], dry_run=False): - """Necessary packages to be updated before undercloud upgrade.""" - - if not packages: - return - - cmd = ['sudo', 'dnf', 'upgrade', '-y'] + packages - - if not dry_run: - self.log.warning("Updating necessary packages: {}".format( - " ".join(packages))) - output = utils.run_command(cmd, name="Update extra packages") - self.log.warning("{}".format(output)) - else: - self.log.warning("Would update necessary packages: {}".format( - " ".join(cmd))) - - def _run_upgrade(self, parsed_args): - output_dir = utils.get_output_dir(self.osloconfig.get('output_dir')) - cmd = undercloud_config.\ - prepare_undercloud_deploy( - upgrade=True, - yes=parsed_args.yes, - no_validations=parsed_args.no_validations, - verbose_level=self.app_args.verbose_level, - reproduce_command=parsed_args.reproduce_command, - force_stack_update=parsed_args.force_stack_update) - self.log.warning("Running: %s" % ' '.join(cmd)) - try: - subprocess.check_call(cmd) - self.log.warning( - UNDERCLOUD_UPGRADE_COMPLETION_MESSAGE.format( - os.path.join( - output_dir, - 'tripleo-undercloud-passwords.yaml' - ), - '~/stackrc')) - except Exception as e: - self.log.error(UNDERCLOUD_FAILURE_MESSAGE) - self.log.error(e) - raise exceptions.DeploymentError(e) - - def take_action(self, parsed_args): - # Fetch configuration used to add logging to a file - utils.load_config(self.osloconfig, constants.UNDERCLOUD_CONF_PATH) - utils.configure_logging(self.log, self.app_args.verbose_level, - self.osloconfig['undercloud_log_file']) - self.log.debug("take action(%s)" % parsed_args) - - if (not parsed_args.yes - and not utils.prompt_user_for_confirmation( - constants.UPGRADE_PROMPT, self.log)): - raise exceptions.UndercloudUpgradeNotConfirmed( - constants.UPGRADE_NO) - - utils.ensure_run_as_normal_user() - - if not parsed_args.skip_package_updates: - self._update_extra_packages(constants.UNDERCLOUD_EXTRA_PACKAGES, - parsed_args.dry_run) - - if not parsed_args.dry_run: - self._run_upgrade(parsed_args) diff --git a/tripleoclient/v1/undercloud_backup.py b/tripleoclient/v1/undercloud_backup.py deleted file mode 100644 index 58b26c861..000000000 --- a/tripleoclient/v1/undercloud_backup.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import logging -import os -import yaml - -from osc_lib.command import command -from osc_lib.i18n import _ - -from tripleoclient import constants -from tripleoclient import utils - -LOG = logging.getLogger(__name__ + ".BackupUndercloud") - -INVENTORY = constants.ANSIBLE_INVENTORY.format('overcloud') - - -class BackupUndercloud(command.Command): - """Backup the undercloud""" - - def get_parser(self, prog_name): - parser = argparse.ArgumentParser( - description=self.get_description(), - prog=prog_name, - add_help=False - ) - - parser.add_argument( - '--init', - const='rear', - nargs='?', - action='store', - help=_("Initialize environment for backup, " - "using 'rear' or 'nfs' as args " - "which will check for package install " - "and configured ReaR or NFS server. " - "Defaults to: rear. " - "i.e. --init rear. " - "WARNING: This flag will be deprecated " - "and replaced by '--setup-rear' and " - "'--setup-nfs'.") - ) - - # New flags for tripleo-ansible backup and restore role. - parser.add_argument( - '--setup-nfs', - default=False, - action='store_true', - help=_("Setup the NFS server on the backup node " - "which will install required packages " - "and configuration on the host 'BackupNode' " - "in the ansible inventory.") - - ) - - parser.add_argument( - '--setup-rear', - default=False, - action='store_true', - help=_("Setup ReaR on the 'Undercloud' host which will " - "install and configure ReaR.") - ) - - parser.add_argument( - '--cron', - default=False, - action='store_true', - help=_("Sets up a new cron job that by default will " - "execute a weekly backup at Sundays midnight, " - "but that can be customized by using the " - "tripleo_backup_and_restore_cron extra-var.") - ) - - parser.add_argument( - '--db-only', - default=False, - action='store_true', - help=_("Perform a DB backup of the 'Undercloud' host. " - "The DB backup file will be stored in /home/stack " - "with the name openstack-backup-mysql-.sql.") - ) - - parser.add_argument( - '--inventory', - action='store', - default=INVENTORY, - help=_("Tripleo inventory file generated with " - "tripleo-ansible-inventory command. " - "Defaults to: " + INVENTORY) - ) - - # Parameter to choose the files to backup - parser.add_argument( - '--add-path', - action='append', - default=['/home/stack/'], - help=_("Add additional files to backup. " - "Defaults to: /home/stack/ " - "i.e. --add-path /this/is/a/folder/ " - " --add-path /this/is/a/texfile.txt.") - ) - - parser.add_argument( - "--exclude-path", - default=[], - action="append", - help=_("Exclude path when performing the Undercloud Backup, " - "this option can be specified multiple times. " - "Defaults to: none " - "i.e. --exclude-path /this/is/a/folder/ " - " --exclude-path /this/is/a/texfile.txt.") - ) - - parser.add_argument( - '--save-swift', - default=False, - action='store_true', - help=_("Save backup to swift. " - "Defaults to: False " - "Special attention should be taken that " - "Swift itself is backed up if you call this multiple times " - "the backup size will grow exponentially.") - ) - - parser.add_argument( - '--extra-vars', - default=None, - action='store', - help=_("Set additional variables as Dict or as " - "an absolute path of a JSON or YAML file type. " - "i.e. --extra-vars '{\"key\": \"val\", " - "\"key2\": \"val2\"}' " - "i.e. --extra-vars /path/to/my_vars.yaml " - "i.e. --extra-vars /path/to/my_vars.json. " - "For more information about the variables that " - "can be passed, visit: https://opendev.org/openstack/" - "tripleo-ansible/src/branch/master/tripleo_ansible/" - "roles/backup_and_restore/defaults/main.yml.") - ) - - return parser - - def _parse_extra_vars(self, raw_extra_vars): - - if raw_extra_vars is None: - extra_vars = None - elif os.path.exists(raw_extra_vars): - with open(raw_extra_vars, 'r') as fp: - extra_vars = yaml.safe_load(fp.read()) - else: - try: - extra_vars = yaml.safe_load(raw_extra_vars) - except yaml.YAMLError as exc: - raise RuntimeError( - _('--extra-vars is not an existing file and cannot be ' - 'parsed as YAML / JSON: %s') % exc) - - return extra_vars - - def _run_backup_undercloud(self, parsed_args): - - extra_vars = self._parse_extra_vars(parsed_args.extra_vars) - - LOG.warning( - '\n' - ' #############################################################\n' - ' # Deprecation note #\n' - ' # Backup and restore feature is deprecated and will be #\n' - ' # removed in the next release. #\n' - ' #############################################################\n') - - if not (os.path.isfile(parsed_args.inventory) and - os.access(parsed_args.inventory, os.R_OK)): - raise RuntimeError( - _('The inventory file {} does not exist or is not ' - 'readable'.format(parsed_args.inventory))) - - if parsed_args.db_only is True: - - self._run_ansible_playbook( - playbook='cli-undercloud-db-backup.yaml', - inventory=parsed_args.inventory, - tags=None, - skip_tags=None, - extra_vars=extra_vars - ) - else: - - if parsed_args.setup_nfs is True or parsed_args.init == 'nfs': - - self._run_ansible_playbook( - playbook='prepare-nfs-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_nfs_server', - skip_tags=None, - extra_vars=extra_vars - ) - if parsed_args.setup_rear is True or parsed_args.init == 'rear': - - self._run_ansible_playbook( - playbook='prepare-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_setup_rear', - skip_tags=None, - extra_vars=extra_vars - ) - - if parsed_args.cron is True: - - self._run_ansible_playbook( - playbook='cli-undercloud-backup-cron.yaml', - inventory=parsed_args.inventory, - tags=None, - skip_tags=None, - extra_vars=extra_vars - ) - - if (parsed_args.setup_nfs is False and - parsed_args.setup_rear is False and - parsed_args.cron is False and - parsed_args.init is None): - - self._run_ansible_playbook( - playbook='cli-undercloud-backup.yaml', - inventory=parsed_args.inventory, - tags='bar_create_recover_image', - skip_tags=None, - extra_vars=extra_vars - ) - - def _legacy_backup_undercloud(self, parsed_args): - """Legacy backup undercloud. - - This will allow for easier removal once the functionality - is no longer needed. - """ - - merge_paths = sorted(list(set(parsed_args.add_path))) - for exc in parsed_args.exclude_path: - if exc in merge_paths: - merge_paths.remove(exc) - - files_to_backup = ','.join(merge_paths) - - # Define the backup sources_path (files to backup). - # This is a comma separated string. - # I.e. "/this/is/a/folder/,/this/is/a/texfile.txt" - extra_vars = {"sources_path": files_to_backup} - if parsed_args.save_swift: - extra_vars.update({"save_swift": True}) - - LOG.debug(_('Launch the Undercloud Backup')) - self._run_ansible_playbook( - playbook='cli-undercloud-backup-legacy.yaml', - inventory='localhost, ', - tags=None, - skip_tags=None, - extra_vars=extra_vars - ) - - def _run_ansible_playbook(self, - playbook, - inventory, - tags, - skip_tags, - extra_vars): - """Run ansible playbook""" - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook=playbook, - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - tags=tags, - skip_tags=skip_tags, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars - ) - - def take_action(self, parsed_args): - - if len(parsed_args.add_path) > 1 or parsed_args.save_swift: - - LOG.warning("The following flags will be deprecated:" - "[--add-path, --exclude-path, --init, --save-swift]") - - self._legacy_backup_undercloud(parsed_args) - - else: - self._run_backup_undercloud(parsed_args) - - print( - '\n' - ' #############################################################\n' - ' # Disclaimer #\n' - ' # Backup verification is the End Users responsibility #\n' - ' # Please verify backup integrity before any possible #\n' - ' # disruptive actions against the Undercloud. The resulting #\n' - ' # backup file path will be shown on a successful execution. #\n' - ' # #\n' - ' # .-Stay safe and avoid future issues-. #\n' - ' #############################################################\n' - ) diff --git a/tripleoclient/v1/undercloud_config.py b/tripleoclient/v1/undercloud_config.py deleted file mode 100644 index a99cd1134..000000000 --- a/tripleoclient/v1/undercloud_config.py +++ /dev/null @@ -1,874 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Plugin action implementation""" - -import yaml -import logging -import netaddr -import os -import shutil -import sys - -from cryptography import x509 - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization - -from jinja2 import Environment -from jinja2 import FileSystemLoader -from jinja2 import meta - -from osc_lib.i18n import _ -from oslo_config import cfg -from tripleo_common.image import kolla_builder - -from tripleoclient.config.undercloud import load_global_config -from tripleoclient.config.undercloud import UndercloudConfig -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import utils -from tripleoclient.v1 import undercloud_preflight - - -# Provides mappings for some of the instack_env tags to undercloud heat -# params or undercloud.conf opts known here (as a fallback), needed to maintain -# feature parity with instack net config override templates. -# TODO(bogdando): all of the needed mappings should be wired-in, eventually -INSTACK_NETCONF_MAPPING = { - 'LOCAL_INTERFACE': 'local_interface', - 'LOCAL_IP': 'local_ip', - 'LOCAL_MTU': 'UndercloudLocalMtu', - 'PUBLIC_INTERFACE_IP': 'undercloud_public_host', # can't be 'CloudName' - 'UNDERCLOUD_NAMESERVERS': 'undercloud_nameservers', - 'SUBNETS_STATIC_ROUTES': 'ControlPlaneStaticRoutes', -} - -MULTI_PARAMETER_MAPPING = {} - -PARAMETER_MAPPING = { - 'inspection_interface': 'IronicInspectorInterface', - 'ipxe_enabled': 'IronicInspectorIPXEEnabled', - 'undercloud_debug': 'Debug', - 'certificate_generation_ca': 'CertmongerCA', - 'undercloud_public_host': 'CloudName', - 'local_mtu': 'UndercloudLocalMtu', - 'clean_nodes': 'IronicAutomatedClean', - 'container_healthcheck_disabled': 'ContainerHealthcheckDisabled', - 'local_subnet': 'UndercloudCtlplaneLocalSubnet', - 'enable_routed_networks': 'UndercloudEnableRoutedNetworks', - 'local_interface': 'NeutronPublicInterface', - 'auth_token_lifetime': 'TokenExpiration', -} - -SUBNET_PARAMETER_MAPPING = { - 'cidr': 'NetworkCidr', - 'gateway': 'NetworkGateway', - 'host_routes': 'HostRoutes', -} - -THT_HOME = os.environ.get('THT_HOME', - "/usr/share/openstack-tripleo-heat-templates/") - -USER_HOME = os.environ.get('HOME', '') - -CONF = cfg.CONF - -# When adding new options to the lists below, make sure to regenerate the -# sample config by running "tox -e genconfig" in the project root. -ci_defaults = kolla_builder.container_images_prepare_defaults() - -config = UndercloudConfig() - -# Routed subnets -_opts = config.get_opts() -load_global_config() - - -def _load_subnets_config_groups(): - for group in CONF.subnets: - g = cfg.OptGroup(name=group, title=group) - if group == CONF.local_subnet: - CONF.register_opts(config.get_local_subnet_opts(), group=g) - else: - CONF.register_opts(config.get_remote_subnet_opts(), group=g) - - -LOG = logging.getLogger(__name__ + ".undercloud_config") - - -def _get_jinja_env_source(f): - path, filename = os.path.split(f) - env = Environment(loader=FileSystemLoader(path)) - src = env.loader.get_source(env, filename)[0] - return (env, src) - - -def _get_unknown_instack_tags(env, src): - found_tags = set(meta.find_undeclared_variables(env.parse(src))) - known_tags = set(INSTACK_NETCONF_MAPPING.keys()) - if found_tags <= known_tags: - return (', ').join(found_tags - known_tags) - return None - - -def _process_drivers_and_hardware_types(conf, env): - """Populate the environment with ironic driver information.""" - # Ensure correct rendering of the list and uniqueness of the items - enabled_hardware_types = list(conf.enabled_hardware_types) - if conf.enable_node_discovery: - if conf.discovery_default_driver not in enabled_hardware_types: - enabled_hardware_types.append(conf.discovery_default_driver) - env['IronicInspectorEnableNodeDiscovery'] = True - env['IronicInspectorDiscoveryDefaultDriver'] = ( - conf.discovery_default_driver) - - env['IronicEnabledNetworkInterfaces'] = \ - conf.ironic_enabled_network_interfaces - env['IronicDefaultNetworkInterface'] = \ - conf.ironic_default_network_interface - - # In most cases power and management interfaces are called the same, so we - # use one variable for them. - mgmt_interfaces = ['ipmitool'] - # TODO(dtantsur): can we somehow avoid hardcoding hardware types here? - for hw_type in ('redfish', 'idrac', 'ilo', 'irmc', 'staging-ovirt', - 'xclarity'): - if hw_type in enabled_hardware_types: - mgmt_interfaces.append(hw_type) - mgmt_interfaces.append('fake') - - bios_interfaces = ['no-bios'] - for hw_type in ['ilo', 'irmc', 'redfish']: - if hw_type in enabled_hardware_types: - bios_interfaces.append(hw_type) - - # Two hardware types use non-default boot interfaces. - boot_interfaces = ['ipxe', 'pxe'] - for hw_type in ('ilo', 'irmc'): - if hw_type in enabled_hardware_types: - boot_interfaces.append('%s-pxe' % hw_type) - - inspect_interfaces = ['inspector', 'no-inspect'] - for hw_type in ('redfish', 'idrac', 'ilo', 'irmc'): - if hw_type in enabled_hardware_types: - inspect_interfaces.append(hw_type) - - raid_interfaces = ['no-raid'] - if 'idrac' in enabled_hardware_types: - raid_interfaces.append('idrac') - - vendor_interfaces = ['no-vendor'] - for (hw_type, iface) in [('ipmi', 'ipmitool'), - ('idrac', 'idrac')]: - if hw_type in enabled_hardware_types: - vendor_interfaces.append(iface) - - power_interfaces = mgmt_interfaces.copy() - # The snmp hardware type uses noop management and snmp power; noop - # management is also used by ipmi and staging hardware types. - mgmt_interfaces.append('noop') - if 'snmp' in enabled_hardware_types: - power_interfaces.append('snmp') - - deploy_interfaces = ['direct', 'ansible', 'ramdisk'] - if 'fake-hardware' in enabled_hardware_types: - deploy_interfaces.append('fake') - boot_interfaces.append('fake') - - env['IronicEnabledHardwareTypes'] = enabled_hardware_types - - env['IronicEnabledBiosInterfaces'] = bios_interfaces - env['IronicEnabledBootInterfaces'] = boot_interfaces - env['IronicEnabledInspectInterfaces'] = inspect_interfaces - env['IronicEnabledManagementInterfaces'] = mgmt_interfaces - env['IronicEnabledPowerInterfaces'] = power_interfaces - env['IronicEnabledRaidInterfaces'] = raid_interfaces - env['IronicEnabledVendorInterfaces'] = vendor_interfaces - env['IronicEnabledDeployInterfaces'] = deploy_interfaces - - -def _process_ipa_args(conf, env): - """Populate the environment with IPA kernal args .""" - inspection_kernel_args = ['console=tty0', 'console=ttyS0,115200'] - if conf.undercloud_debug: - inspection_kernel_args.append('ipa-debug=1') - if conf.inspection_runbench: - inspection_kernel_args.append('ipa-inspection-benchmarks=cpu,mem,disk') - if conf.inspection_extras: - inspection_kernel_args.append('ipa-inspection-dhcp-all-interfaces=1') - inspection_kernel_args.append('ipa-collect-lldp=1') - env['IronicInspectorCollectors'] = ('default,extra-hardware,' - 'numa-topology,logs') - else: - env['IronicInspectorCollectors'] = 'default,logs' - env['IronicInspectorKernelArgs'] = ' '.join(inspection_kernel_args) - - -def _generate_inspection_subnets(): - env_list = [] - for subnet in CONF.subnets: - env_dict = {} - s = CONF.get(subnet) - env_dict['tag'] = subnet - try: - if netaddr.IPNetwork(s.cidr).version == 4: - env_dict['ip_range'] = s.inspection_iprange - if netaddr.IPNetwork(s.cidr).version == 6: - if CONF['ipv6_address_mode'] == 'dhcpv6-stateful': - env_dict['ip_range'] = s.inspection_iprange - if CONF['ipv6_address_mode'] == 'dhcpv6-stateless': - # dnsmasq(8): A static-only subnet with address all zeros - # may be used as a "catch-all" address to enable replies to - # all Information-request packets on a subnet which is - # provided with stateless DHCPv6, ie --dhcp-range=::,static - env_dict['ip_range'] = ','.join( - [str(netaddr.IPNetwork(s.cidr).ip), 'static']) - env_dict['netmask'] = str(netaddr.IPNetwork(s.cidr).netmask) - env_dict['gateway'] = s.gateway - env_dict['host_routes'] = s.host_routes - env_dict['mtu'] = CONF.local_mtu - env_list.append(env_dict) - except Exception as e: - msg = _('Invalid configuration data in subnet "{}". Double check ' - 'the settings for this subnet. Error: {}').format(subnet, - e) - LOG.error(msg) - raise exceptions.DeploymentError(msg) - return env_list - - -def _generate_subnets_static_routes(): - env_list = [] - local_router = CONF.get(CONF.local_subnet).gateway - for subnet in CONF.subnets: - if subnet == str(CONF.local_subnet): - continue - s = CONF.get(subnet) - env_list.append({'destination': s.cidr, 'nexthop': local_router}) - for route in CONF.get(CONF.local_subnet).host_routes: - env_list.append({'destination': route['destination'], - 'nexthop': route['nexthop']}) - - return env_list - - -def _generate_masquerade_networks(): - """Create input for OS::TripleO::Services::MasqueradeNetworks - - The service use parameter MasqueradeNetworks with the following - formating: - {'source_cidr_A': ['destination_cidr_A', 'destination_cidr_B'], - 'source_cidr_B': ['destination_cidr_A', 'destination_cidr_B']} - """ - network_cidrs = [] - for subnet in CONF.subnets: - s = CONF.get(subnet) - network_cidrs.append(s.cidr) - - masqurade_networks = {} - for subnet in CONF.subnets: - s = CONF.get(subnet) - if s.masquerade: - masqurade_networks.update({s.cidr: network_cidrs}) - - return masqurade_networks - - -def _calculate_allocation_pools(subnet): - """Calculate subnet allocation pools - - Remove the gateway address, the inspection IP range and the undercloud IP's - from the subnets full IP range and return all remaining address ranges as - allocation pools. If dhcp_start and/or dhcp_end is defined, also remove - addresses before dhcp_start and addresses after dhcp_end. - """ - ip_network = netaddr.IPNetwork(subnet.cidr) - # NOTE(hjensas): Ignore the default dhcp_start and dhcp_end if cidr is not - # the default as well. I.e allow not specifying dhcp_start and dhcp_end. - if (subnet.cidr != constants.CTLPLANE_CIDR_DEFAULT - and subnet.dhcp_start == constants.CTLPLANE_DHCP_START_DEFAULT - and subnet.dhcp_end == constants.CTLPLANE_DHCP_END_DEFAULT): - subnet.dhcp_start, subnet.dhcp_end = None, None - if subnet.dhcp_start and subnet.dhcp_end: - ip_set = netaddr.IPSet() - for a, b in zip(subnet.dhcp_start, subnet.dhcp_end): - ip_set.add(netaddr.IPRange(netaddr.IPAddress(a), - netaddr.IPAddress(b))) - else: - ip_set = netaddr.IPSet(ip_network) - # Remove addresses before dhcp_start if defined - if subnet.dhcp_start: - a = netaddr.IPAddress(ip_network.first) - b = netaddr.IPAddress(subnet.dhcp_start[0]) - 1 - ip_set.remove(netaddr.IPRange(a, b)) - # Remove addresses after dhcp_end if defined - if subnet.dhcp_end: - a = netaddr.IPAddress(subnet.dhcp_end[0]) + 1 - b = netaddr.IPAddress(ip_network.last) - ip_set.remove(netaddr.IPRange(a, b)) - # Remove network address and broadcast address - ip_set.remove(ip_network.first) - ip_set.remove(ip_network.last) - # Remove gateway, local_ip, admin_host and public_host addresses - ip_set.remove(netaddr.IPAddress(subnet.get('gateway'))) - ip_set.remove(netaddr.IPNetwork(CONF.local_ip).ip) - ip_set.remove(netaddr.IPNetwork(utils.get_single_ip( - CONF.undercloud_admin_host, ip_version=ip_network.version))) - ip_set.remove(netaddr.IPNetwork(utils.get_single_ip( - CONF.undercloud_public_host, ip_version=ip_network.version))) - # Remove dns nameservers - for addr in subnet.get('dns_nameservers', []): - ip_set.remove(netaddr.IPAddress(addr)) - # Remove addresses in the inspection_iprange - inspect_start, inspect_end = subnet.get('inspection_iprange').split(',') - ip_set.remove(netaddr.IPRange(inspect_start, inspect_end)) - # Remove dhcp_exclude addresses and ip ranges - for exclude in subnet.dhcp_exclude: - if '-' in exclude: - exclude_start, exclude_end = exclude.split('-') - ip_set.remove(netaddr.IPRange(exclude_start, exclude_end)) - else: - ip_set.remove(netaddr.IPAddress(exclude)) - - return [{'start': netaddr.IPAddress(ip_range.first).format(), - 'end': netaddr.IPAddress(ip_range.last).format()} - for ip_range in list(ip_set.iter_ipranges())] - - -def _generate_inspection_physnet_cidr_map(): - cidr_map = {} - for subnet in CONF.subnets: - s = CONF.get(subnet) - if subnet == str(CONF.local_subnet): - cidr_map[s.cidr] = 'ctlplane' - else: - cidr_map[s.cidr] = subnet - - return cidr_map - - -def _process_network_args(env): - """Populate the environment with network configuration.""" - - env['IronicInspectorSubnets'] = _generate_inspection_subnets() - env['PortPhysnetCidrMap'] = _generate_inspection_physnet_cidr_map() - env['ControlPlaneStaticRoutes'] = _generate_subnets_static_routes() - env['UndercloudCtlplaneSubnets'] = {} - env['UndercloudCtlplaneIPv6AddressMode'] = CONF['ipv6_address_mode'] - for subnet in CONF.subnets: - s = CONF.get(subnet) - env['UndercloudCtlplaneSubnets'][subnet] = { - 'AllocationPools': _calculate_allocation_pools(s) - } - if s.get('dns_nameservers'): - env['UndercloudCtlplaneSubnets'][subnet].update( - {'DnsNameServers': s['dns_nameservers']}) - else: - env['UndercloudCtlplaneSubnets'][subnet].update( - {'DnsNameServers': CONF['undercloud_nameservers']}) - for param_key, param_value in SUBNET_PARAMETER_MAPPING.items(): - if param_value: - env['UndercloudCtlplaneSubnets'][subnet].update( - {param_value: s[param_key]}) - env['MasqueradeNetworks'] = _generate_masquerade_networks() - if len(CONF['undercloud_nameservers']) > 5: - raise exceptions.InvalidConfiguration('Too many nameservers provided. ' - 'Please provide less than 6 ' - 'servers in undercloud_' - 'nameservers.') - - # We do not use undercloud ips for env, but just validate the configured - # value here. - if (CONF.get('generate_service_certificate') or - CONF.get('undercloud_service_certificate')): - if CONF.local_ip.split('/')[0] == CONF.undercloud_admin_host: - msg = ("Different IPs should be assigned to local_ip and " - "undercloud_admin_host") - raise exceptions.InvalidConfiguration(msg) - - -def _env_set_undercloud_ctlplane_networks_attribues(env): - env['CtlplaneNetworkAttributes'] = dict(network=dict(), subnets=dict()) - env['CtlplaneNetworkAttributes']['network']['mtu'] = CONF.local_mtu - env['CtlplaneNetworkAttributes']['subnets']['ctlplane-subnet'] = { - 'cidr': CONF.get(CONF.local_subnet).cidr, - 'gateway_ip': CONF.get(CONF.local_subnet).gateway, - 'dns_nameservers': CONF.undercloud_nameservers, - 'host_routes': _generate_subnets_static_routes(), - 'tags': [], - } - - -def _process_chrony_acls(env): - """Populate ACL rules for chrony to allow ctlplane subnets""" - acl_rules = [] - for subnet in CONF.subnets: - s = CONF.get(subnet) - acl_rules.append('allow ' + s.get('cidr')) - env['ChronyAclRules'] = acl_rules - - -def prepare_undercloud_deploy(upgrade=False, no_validations=True, - verbose_level=1, yes=False, - force_stack_update=False, dry_run=False, - inflight=False, - reproduce_command=False, - disable_container_prepare=False): - """Prepare Undercloud deploy command based on undercloud.conf""" - - if CONF.get('undercloud_hostname'): - utils.set_hostname(CONF.get('undercloud_hostname')) - - env_data = {} - registry_overwrites = {} - deploy_args = [] - net_config_yaml = None - # Fetch configuration and use its log file param to add logging to a file - utils.load_config(CONF, constants.UNDERCLOUD_CONF_PATH) - utils.configure_logging(LOG, verbose_level, CONF['undercloud_log_file']) - _load_subnets_config_groups() - - output_dir = utils.get_output_dir(CONF['output_dir']) - - # NOTE(bogdando): the generated env files are stored another path then - # picked up later. - # NOTE(aschultz): We copy this into the tht root that we save because - # we move any user provided environment files into this root later. - tempdir = os.path.join(os.path.abspath(output_dir), - 'tripleo-config-generated-env-files') - utils.makedirs(tempdir) - - # Set the undercloud home dir parameter so that stackrc is produced in - # the users home directory. - env_data['UndercloudHomeDir'] = USER_HOME - - env_data['PythonInterpreter'] = sys.executable - - env_data['ContainerImagePrepareDebug'] = CONF['undercloud_debug'] - - for param_key, param_value in PARAMETER_MAPPING.items(): - if param_key in CONF.keys(): - env_data[param_value] = CONF[param_key] - - # Some undercloud config options need to tweak multiple template parameters - for undercloud_key in MULTI_PARAMETER_MAPPING: - for env_value in MULTI_PARAMETER_MAPPING[undercloud_key]: - if undercloud_key in CONF.keys(): - env_data[env_value] = CONF[undercloud_key] - - # Set up parameters for undercloud networking - _process_network_args(env_data) - _env_set_undercloud_ctlplane_networks_attribues(env_data) - - # Setup parameter for Chrony ACL rules - _process_chrony_acls(env_data) - - # Parse the undercloud.conf options to include necessary args and - # yaml files for undercloud deploy command - - if CONF.get('undercloud_enable_selinux'): - env_data['SELinuxMode'] = 'enforcing' - else: - env_data['SELinuxMode'] = 'permissive' - - if CONF.get('undercloud_ntp_servers'): - env_data['NtpServer'] = CONF['undercloud_ntp_servers'] - - env_data['TimeZone'] = (CONF.get('undercloud_timezone') or - utils.get_local_timezone()) - - if CONF.get('enable_validations'): - env_data['UndercloudConfigFilePath'] = constants.UNDERCLOUD_CONF_PATH - if not no_validations: - env_data['EnableValidations'] = CONF['enable_validations'] - - if CONF.get('overcloud_domain_name'): - env_data['NeutronDnsDomain'] = CONF['overcloud_domain_name'] - deploy_args.append('--local-domain=%s' % CONF['overcloud_domain_name']) - - local_registry_name = '.'.join([utils.get_short_hostname(), - 'ctlplane', - CONF['overcloud_domain_name']]) - if CONF.get('container_cli', 'podman') == 'podman': - env_data['DockerInsecureRegistryAddress'] = [local_registry_name] - env_data['DockerInsecureRegistryAddress'].append( - CONF['local_ip'].split('/')[0]) - env_data['DockerInsecureRegistryAddress'].append( - CONF['undercloud_admin_host']) - else: - msg = ('Unsupported container_cli: %s' % CONF['container_cli']) - raise exceptions.InvalidConfiguration(msg) - - env_data['DockerInsecureRegistryAddress'].extend( - CONF['container_insecure_registries']) - - env_data['ContainerCli'] = CONF['container_cli'] - - if CONF.get('container_registry_mirror'): - env_data['DockerRegistryMirror'] = CONF['container_registry_mirror'] - - # This parameter the IP address used to bind the local container registry - env_data['LocalContainerRegistry'] = local_registry_name - - if CONF['additional_architectures']: - # In queens (instack-undercloud) we used this to setup additional - # architectures. For rocky+ we want to pass a list and be smarter in - # THT. We can remove this in 'T' when we get there. - for arch in CONF['additional_architectures']: - env_data['EnableArchitecture%s' % arch.upper()] = True - env_data['AdditionalArchitectures'] = \ - ','.join(CONF['additional_architectures']) - - if CONF.get('local_ip'): - deploy_args.append('--local-ip=%s' % CONF['local_ip']) - - tht_templates = CONF.get('templates') or THT_HOME - deploy_args.append('--templates=%s' % tht_templates) - - if CONF.get('roles_file'): - deploy_args.append('--roles-file=%s' % CONF['roles_file']) - else: - deploy_args.append('--roles-file=%s' % os.path.join( - tht_templates, constants.UNDERCLOUD_ROLES_FILE)) - - networks_file = (CONF.get('networks_file') or - os.path.join(tht_templates, - constants.UNDERCLOUD_NETWORKS_FILE)) - deploy_args.append('--networks-file=%s' % networks_file) - - if yes: - deploy_args += ['-y'] - - if upgrade: - deploy_args += [ - '--upgrade', - '-e', os.path.join( - tht_templates, - "environments/lifecycle/undercloud-upgrade-prepare.yaml")] - - if not CONF.get('heat_native', False): - deploy_args.append('--heat-native=False') - else: - deploy_args.append('--heat-native') - - if CONF.get('heat_container_image'): - deploy_args.append('--heat-container-image=%s' - % CONF['heat_container_image']) - - # These should be loaded first so we can override all the bits later - deploy_args += [ - "-e", os.path.join(tht_templates, "environments/undercloud.yaml"), - '-e', os.path.join(tht_templates, 'environments/use-dns-for-vips.yaml') - ] - - # If a container images file is used, copy it into the tempdir to make it - # later into other deployment artifacts and user-provided files. - _container_images_config(CONF, deploy_args, env_data, tempdir) - - if env_data['MasqueradeNetworks']: - deploy_args += ['-e', os.path.join( - tht_templates, "environments/services/masquerade-networks.yaml")] - - if CONF.get('enable_keystone'): - LOG.warning('Keystone has been deprecated and no longer be installed ' - 'using the enable_keystone. Please remove it from ' - 'undercloud.conf.') - - if CONF.get('enable_ironic'): - deploy_args += ['-e', os.path.join( - tht_templates, "environments/services/ironic.yaml")] - - # ironic-inspector can only work if ironic is enabled - if CONF.get('enable_ironic_inspector'): - deploy_args += ['-e', os.path.join( - tht_templates, - "environments/services/ironic-inspector.yaml")] - - _process_drivers_and_hardware_types(CONF, env_data) - _process_ipa_args(CONF, env_data) - - if CONF.get('enable_nova'): - LOG.warning('Nova has been deprecated and no longer be installed ' - 'using the enable_nova. Please remove it from ' - 'undercloud.conf.') - - if CONF.get('enable_novajoin'): - LOG.warning('Novajoin has been deprecated and no longer be installed ' - 'using the enable_novajoin option. Please remove it from ' - 'undercloud.conf. TLS Everywhere is now implemented ' - 'using an ansible module instead.') - - if not CONF.get('enable_neutron'): - deploy_args += ['-e', os.path.join( - tht_templates, "environments/disable-neutron.yaml")] - - if CONF.get('enable_heat'): - LOG.warning('enable_heat has been deprecated. Ephemeral heat ' - 'is now deployed by default. Please remove the ' - 'option from undercloud.conf.') - - if CONF.get('ipa_otp'): - deploy_args += ['-e', os.path.join( - tht_templates, "environments/services/undercloud-tls.yaml")] - env_data['UndercloudIpaOtp'] = CONF['ipa_otp'] - - if CONF.get('enable_swift'): - LOG.warning('Swift has been deprecated and no longer be installed ' - 'using the enable_swift') - - if CONF.get('enable_telemetry'): - LOG.warning('Telemetry has been deprecated and no longer be installed ' - 'using the enable_telemetry') - - if CONF.get('enable_cinder'): - LOG.warning('Cinder has been deprecated and no longer be installed ' - 'using the enable_cinder') - - if CONF.get('enable_swift_encryption'): - LOG.warning('Swift has been deprecated and no longer be installed ' - 'using the enable_swift_encryption') - - if CONF.get('enable_frr'): - deploy_args += ['-e', os.path.join( - tht_templates, - "environments/services/frr.yaml")] - - if CONF.get('undercloud_service_certificate'): - # We assume that the certificate is trusted - env_data['InternalTLSCAFile'] = '' - env_data.update( - _get_public_tls_parameters( - CONF.get('undercloud_service_certificate'))) - elif CONF.get('generate_service_certificate'): - deploy_args += ['-e', os.path.join( - tht_templates, - "environments/public-tls-undercloud.yaml")] - if CONF.get('certificate_generation_ca') == 'IPA': - env_data['InternalTLSCAFile'] = '/etc/ipa/ca.crt' - env_data['PublicTLSCAFile'] = '/etc/ipa/ca.crt' - else: - deploy_args += ['-e', os.path.join( - tht_templates, - "environments/ssl/no-tls-endpoints-public-ip.yaml")] - - if (CONF.get('generate_service_certificate') or - CONF.get('undercloud_service_certificate')): - local_net = netaddr.IPNetwork(CONF.get('local_ip')) - - endpoint_environment = _get_tls_endpoint_environment( - CONF.get('undercloud_public_host'), tht_templates) - - public_host = utils.get_single_ip(CONF.get('undercloud_public_host'), - ip_version=local_net.version) - public_ip = netaddr.IPAddress(public_host) - deploy_args += ['--public-virtual-ip', public_host] - - # To make sure the resolved host is set to the right IP in /etc/hosts - if not utils.is_valid_ip(CONF.get('undercloud_public_host')): - extra_host = public_host + ' ' + CONF.get('undercloud_public_host') - env_data['ExtraHostFileEntries'] = extra_host - - admin_host = utils.get_single_ip(CONF.get('undercloud_admin_host'), - ip_version=local_net.version) - admin_ip = netaddr.IPAddress(admin_host) - deploy_args += ['--control-virtual-ip', admin_host] - - if not CONF.get('net_config_override'): - if (admin_ip not in local_net.cidr or - public_ip not in local_net.cidr): - LOG.warning('undercloud_admin_host or undercloud_public_host ' - 'is not in the same cidr as local_ip.') - - deploy_args += [ - '-e', endpoint_environment, - '-e', os.path.join( - tht_templates, - 'environments/services/undercloud-haproxy.yaml')] - - u = CONF.get('deployment_user') or utils.get_deployment_user() - env_data['DeploymentUser'] = u - # TODO(cjeanner) drop that once using oslo.privsep - deploy_args += ['--deployment-user', u] - - deploy_args += ['--output-dir=%s' % output_dir] - utils.makedirs(output_dir) - - if CONF.get('cleanup'): - deploy_args.append('--cleanup') - - if CONF.get('net_config_override'): - data_file = CONF['net_config_override'] - if os.path.abspath(data_file) != data_file: - data_file = os.path.join(USER_HOME, data_file) - - if not os.path.exists(data_file): - msg = _("Could not find net_config_override file '%s'") % data_file - LOG.error(msg) - raise RuntimeError(msg) - - # NOTE(bogdando): Process templated net config override data: - # * get a list of used instack_env j2 tags (j2 vars, like {{foo}}), - # * fetch values for the tags from the known mappins, - # * raise, if there is unmatched tags left - # * render the template into a JSON dict - net_config_env, template_source = _get_jinja_env_source(data_file) - unknown_tags = _get_unknown_instack_tags(net_config_env, - template_source) - if unknown_tags: - msg = (_('Can not render net_config_override file {0} contains ' - 'unknown instack_env j2 tags: {1}').format( - data_file, unknown_tags)) - LOG.error(msg) - raise exceptions.DeploymentError(msg) - - # Create rendering context from the known to be present mappings for - # identified instack_env tags to generated in env_data undercloud heat - # params. Fall back to config opts, when env_data misses a param. - context = {} - for tag in INSTACK_NETCONF_MAPPING.keys(): - mapped_value = INSTACK_NETCONF_MAPPING[tag] - if mapped_value in env_data.keys() or mapped_value in CONF.keys(): - try: - context[tag] = CONF[mapped_value] - except cfg.NoSuchOptError: - context[tag] = env_data.get(mapped_value) - - # this returns a unicode string, convert it in into json - net_config_str = net_config_env.get_template( - os.path.split(data_file)[-1]).render(context).replace( - "'", '"').replace('"', '"') - try: - net_config_yaml = yaml.safe_load(net_config_str) - except ValueError: - net_config_yaml = yaml.safe_load("{%s}" % net_config_str) - - if 'network_config' not in net_config_yaml: - msg = ('Unsupported data format in net_config_override ' - 'file %s: %s' % (data_file, net_config_str)) - LOG.error(msg) - raise exceptions.DeploymentError(msg) - - env_data['UndercloudNetConfigOverride'] = net_config_yaml - - params_file = os.path.join(tempdir, 'undercloud_parameters.yaml') - utils.write_env_file(env_data, params_file, registry_overwrites) - deploy_args += ['-e', params_file] - - if CONF.get('hieradata_override'): - data_file = CONF['hieradata_override'] - if os.path.abspath(data_file) != data_file: - data_file = os.path.join(USER_HOME, data_file) - - if not os.path.exists(data_file): - msg = _("Could not find hieradata_override file '%s'") % data_file - LOG.error(msg) - raise RuntimeError(msg) - - deploy_args += ['--hieradata-override=%s' % data_file] - - if CONF.get('enable_validations') and not no_validations: - undercloud_preflight.check(verbose_level, upgrade, net_config_yaml) - deploy_args += ['-e', os.path.join( - tht_templates, "environments/tripleo-validations.yaml")] - - if inflight: - deploy_args.append('--inflight-validations') - - if reproduce_command: - deploy_args.append('--reproduce-command') - - if disable_container_prepare: - deploy_args.append('--disable-container-prepare') - - if CONF.get('custom_env_files'): - for custom_file in CONF['custom_env_files']: - deploy_args += ['-e', custom_file] - - if verbose_level > 1: - deploy_args.append('--debug') - - deploy_args.append('--log-file=%s' % CONF['undercloud_log_file']) - - # Always add a drop-in for the ephemeral undercloud heat stack - # virtual state tracking (the actual file will be created later) - stack_vstate_dropin = os.path.join( - tht_templates, 'undercloud-stack-vstate-dropin.yaml') - deploy_args += ["-e", stack_vstate_dropin] - if force_stack_update: - deploy_args += ["--force-stack-update"] - - cmd = ["sudo", "--preserve-env", "openstack", "tripleo", "deploy", - "--standalone-role", "Undercloud", "--stack", - "undercloud"] - cmd += deploy_args[:] - - # In dry-run, also report the expected heat stack virtual state/action - if dry_run: - stack_update_mark = os.path.join( - constants.STANDALONE_EPHEMERAL_STACK_VSTATE, - 'update_mark_undercloud') - if os.path.isfile(stack_update_mark) or force_stack_update: - LOG.warning(_('The heat stack undercloud virtual state/action ' - ' would be UPDATE')) - - return cmd - - -def _get_tls_endpoint_environment(public_host, tht_templates): - try: - netaddr.IPAddress(public_host) - return os.path.join(tht_templates, - "environments/ssl/tls-endpoints-public-ip.yaml") - except netaddr.core.AddrFormatError: - return os.path.join(tht_templates, - "environments/ssl/tls-endpoints-public-dns.yaml") - - -def _get_public_tls_parameters(service_certificate_path): - with open(service_certificate_path, "rb") as pem_file: - pem_data = pem_file.read() - cert = x509.load_pem_x509_certificate(pem_data, default_backend()) - private_key = serialization.load_pem_private_key( - pem_data, - password=None, - backend=default_backend()) - - key_pem = private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption()) - cert_pem = cert.public_bytes(serialization.Encoding.PEM) - return { - 'SSLCertificate': cert_pem, - 'SSLKey': key_pem - } - - -def _container_images_config(conf, deploy_args, env_data, tempdir): - if conf.container_images_file: - deploy_args += ['-e', conf.container_images_file] - try: - shutil.copy(os.path.abspath(conf.container_images_file), tempdir) - except Exception: - msg = _('Cannot copy a container images' - 'file %s into a tempdir!') % conf.container_images_file - LOG.error(msg) - raise exceptions.DeploymentError(msg) - else: - # no images file was provided. Set a default ContainerImagePrepare - # parameter to trigger the preparation of the required container list - cip = kolla_builder.CONTAINER_IMAGE_PREPARE_PARAM - env_data['ContainerImagePrepare'] = cip diff --git a/tripleoclient/v1/undercloud_preflight.py b/tripleoclient/v1/undercloud_preflight.py deleted file mode 100644 index cdf05984e..000000000 --- a/tripleoclient/v1/undercloud_preflight.py +++ /dev/null @@ -1,559 +0,0 @@ -# Copyright 2017 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import netaddr -import netifaces -import os -import subprocess -import sys -import yaml - -from osc_lib.i18n import _ - -from oslo_utils import netutils -import psutil - -from oslo_config import cfg - -from tripleoclient import constants -from tripleoclient import utils -from tripleo_common.inventory import TripleoInventory - - -class FailedValidation(Exception): - pass - - -CONF = cfg.CONF - -# We need 8 GB, leave a little room for variation in what 8 GB means on -# different platforms. -REQUIRED_MB = 7680 - -LOG = logging.getLogger(__name__ + ".UndercloudSetup") - - -def _run_live_command(args, env=None, name=None, cwd=None, wait=True): - """Run the command defined by args, env and cwd - - Either returns the process handler or runs the process - asynchronously so the output can be logged while the process is still - running. - """ - if name is None: - name = args[0] - process = subprocess.Popen(args, env=env, cwd=cwd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) - if not wait: - return process - - while True: - line = process.stdout.readline() - if line: - LOG.info(line.rstrip()) - if line == '' and process.poll() is not None: - break - if process.returncode != 0: - message = '%s failed. See log for details.' % name - LOG.error(message) - raise RuntimeError(message) - - -def _run_validations(upgrade=False): - """Run tripleo-validations playbook - - This runs simple ansible playbooks located in tripleo-validations - There are currently three playbooks: - - undercloud-disk-space.yaml - - undercloud-disk-space-pre-upgrade.yaml - - undercloud-disabled-services.yaml - First one checks minimal disk space for a brand new deploy. - Second one checks minimal disk space for an upgrade. - Third one checks for services that should be disabled on the undercloud. - """ - if upgrade: - playbook_args = constants.DEPLOY_ANSIBLE_ACTIONS['preflight-upgrade'] - else: - playbook_args = constants.DEPLOY_ANSIBLE_ACTIONS['preflight-deploy'] - - undercloud_hosts_file = os.path.join(constants.CLOUD_HOME_DIR, - 'undercloud_ansible_hosts.yaml') - undercloud_inventory = TripleoInventory() - undercloud_inventory.write_static_inventory(undercloud_hosts_file) - - args = ['validation', 'run', '-i', undercloud_hosts_file, '--validation', - ','.join(playbook_args['playbooks'])] - _run_live_command(args) - - -def _check_memory(): - """Check system memory - - The undercloud will not run properly in less than 8 GB of memory. - This function verifies that at least that much is available before - proceeding with install. - """ - mem = psutil.virtual_memory() - swap = psutil.swap_memory() - total_mb = (mem.total + swap.total) / 1024 / 1024 - if total_mb < REQUIRED_MB: - LOG.error(_('At least {0} MB of memory is required for undercloud ' - 'installation. A minimum of 8 GB is recommended. ' - 'Only detected {1} MB').format(REQUIRED_MB, total_mb)) - raise RuntimeError(_('Insufficient memory available')) - - -def _check_ipv6_enabled(): - """Test if IPv6 is enabled - - If /proc/net/if_inet6 exist ipv6 sysctl settings are available. - """ - return os.path.isfile('/proc/net/if_inet6') - - -def _wrap_ipv6(ip): - """Wrap a IP address in square brackets if IPv6 - - """ - if netutils.is_valid_ipv6(ip): - return "[%s]" % ip - return ip - - -def _check_sysctl(): - """Check sysctl option availability - - The undercloud will not install properly if some of the expected sysctl - values are not available to be set. - """ - options = ['net.ipv4.ip_forward', 'net.ipv4.ip_nonlocal_bind'] - if _check_ipv6_enabled(): - options.append('net.ipv6.ip_nonlocal_bind') - - not_available = [] - for option in options: - path = '/proc/sys/{opt}'.format(opt=option.replace('.', '/')) - if not os.path.isfile(path): - not_available.append(option) - - if not_available: - LOG.error(_('Required sysctl options are not available. Check ' - 'that your kernel is up to date. Missing: {options}') - .format(options=", ".join(not_available))) - raise RuntimeError(_('Missing sysctl options')) - - -def _validate_ips(): - def is_ip(value, param_name): - try: - netaddr.IPAddress(value) - except netaddr.core.AddrFormatError: - msg = (_('{0} "{1}" must be a valid IP address') - .format(param_name, value)) - LOG.error(msg) - raise FailedValidation(msg) - for ip in CONF.undercloud_nameservers: - is_ip(ip, 'undercloud_nameservers') - - -def _validate_value_formats(): - """Validate format of some values - - Certain values have a specific format that must be maintained in order to - work properly. For example, local_ip must be in CIDR form, and the - hostname must be a FQDN. - """ - try: - local_ip = netaddr.IPNetwork(CONF.local_ip) - if local_ip.prefixlen == 32: - LOG.error(_('Invalid netmask')) - raise netaddr.AddrFormatError(_('Invalid netmask')) - # If IPv6 the ctlplane network uses the EUI-64 address format, - # which requires the prefix to be /64 - if local_ip.version == 6 and local_ip.prefixlen != 64: - LOG.error(_('Prefix must be 64 for IPv6')) - raise netaddr.AddrFormatError(_('Prefix must be 64 for IPv6')) - except netaddr.core.AddrFormatError as e: - message = (_('local_ip "{0}" not valid: "{1}" ' - 'Value must be in CIDR format.') - .format(CONF.local_ip, str(e))) - LOG.error(message) - raise FailedValidation(message) - hostname = CONF['undercloud_hostname'] - if hostname is not None and '.' not in hostname: - message = (_('Hostname "%s" is not fully qualified.') % hostname) - LOG.error(message) - raise FailedValidation(message) - - -def _validate_in_cidr(subnet_props, subnet_name): - cidr = netaddr.IPNetwork(subnet_props.cidr) - - def validate_addr_in_cidr(addr, pretty_name=None, require_ip=True, - log_only=False): - try: - if netaddr.IPAddress(addr) not in cidr: - message = (_('Config option {0} "{1}" not in defined ' - 'CIDR "{2}"').format(pretty_name, addr, cidr)) - if log_only: - LOG.warning(message) - else: - LOG.error(message) - raise FailedValidation(message) - except netaddr.core.AddrFormatError: - if require_ip: - message = (_('Invalid IP address: %s') % addr) - LOG.error(message) - raise FailedValidation(message) - - validate_addr_in_cidr(subnet_props.gateway, 'gateway') - # NOTE(hjensas): Ignore the default dhcp_start and dhcp_end if cidr is not - # the default as well. I.e allow not specifying dhcp_start and dhcp_end. - if not (subnet_props.cidr != constants.CTLPLANE_CIDR_DEFAULT and - subnet_props.dhcp_start == constants.CTLPLANE_DHCP_START_DEFAULT - and subnet_props.dhcp_end == constants.CTLPLANE_DHCP_END_DEFAULT): - for start in subnet_props.dhcp_start: - validate_addr_in_cidr(start, 'dhcp_start') - for end in subnet_props.dhcp_end: - validate_addr_in_cidr(end, 'dhcp_end') - if subnet_name == CONF.local_subnet: - validate_addr_in_cidr(str(netaddr.IPNetwork(CONF.local_ip).ip), - 'local_ip') - if (CONF.undercloud_service_certificate or - CONF.generate_service_certificate): - validate_addr_in_cidr(CONF['undercloud_public_host'], - 'undercloud_public_host', - require_ip=False, log_only=True) - validate_addr_in_cidr(CONF['undercloud_admin_host'], - 'undercloud_admin_host', - require_ip=False) - - -def _validate_dhcp_range(subnet_props, subnet_name): - len_dhcp_start = len(subnet_props.dhcp_start) - len_dhcp_end = len(subnet_props.dhcp_end) - if (len_dhcp_start > 1 or len_dhcp_end > 1 and - len_dhcp_start != len_dhcp_end): - message = (_('Number of elements in dhcp_start and dhcp_end must be ' - 'identical. Subnet "{0}" have "{1}" dhcp_start elements ' - 'and "{2}" dhcp_end elements.').format(subnet_name, - len_dhcp_start, - len_dhcp_end)) - LOG.error(message) - raise FailedValidation(message) - for a, b in zip(subnet_props.dhcp_start, subnet_props.dhcp_end): - start = netaddr.IPAddress(a) - end = netaddr.IPAddress(b) - if start >= end: - message = (_('Invalid dhcp range specified, dhcp_start "{0}" does ' - 'not come before dhcp_end "{1}"').format(start, end)) - LOG.error(message) - raise FailedValidation(message) - - -def _validate_inspection_range(subnet_props): - start = netaddr.IPAddress(subnet_props.inspection_iprange.split(',')[0]) - end = netaddr.IPAddress(subnet_props.inspection_iprange.split(',')[1]) - if start >= end: - message = (_('Invalid inspection range specified, inspection_iprange ' - '"{0}" does not come before "{1}"').format(start, end)) - LOG.error(message) - raise FailedValidation(message) - - -def _validate_interface_exists(config_var='local_interface'): - """Validate the provided local interface exists""" - if (not CONF.net_config_override - and CONF.get(config_var) not in netifaces.interfaces()): - message = (_('Invalid {0} specified. ' - '{1} is not available.').format(config_var, - CONF.get(config_var))) - LOG.error(message) - raise FailedValidation(message) - - -def _validate_no_ip_change(net_config_yaml=None): - """Disallow provisioning interface IP changes - - Changing the provisioning network IP causes a number of issues, so we - need to disallow it early in the install before configurations start to - be changed. - """ - if CONF.net_config_override: - os_net_config_file = net_config_yaml - else: - os_net_config_file = '/etc/os-net-config/config.yaml' - - if not os.path.isfile( - os.path.expanduser(os_net_config_file)): - os_net_config_file = '/etc/os-net-config/config.json' - # Nothing to do if we haven't already installed - if not os.path.isfile(os.path.expanduser(os_net_config_file)): - return - try: - with open(os_net_config_file, 'r') as f: - network_config = yaml.safe_load(f) - ctlplane = [i for i in network_config.get('network_config', []) - if i.get('name') == 'br-ctlplane'][0] - - except ValueError: - # File was empty - return - except IndexError: - # Nothing to check if br-ctlplane wasn't configured - return - existing_ip = ctlplane['addresses'][0]['ip_netmask'] - conf_netaddr = netaddr.IPNetwork(CONF.local_ip) - existing_netaddr = netaddr.IPNetwork(existing_ip) - if (conf_netaddr != existing_netaddr - or conf_netaddr.ip != existing_netaddr.ip): - message = _('Changing the local_ip is not allowed. Existing IP: ' - '{0}, Configured IP: {1}').format( - existing_ip, CONF.local_ip) - LOG.error(message) - raise FailedValidation(message) - - -def _validate_passwords_file(): - """Disallow updates if the passwords file is missing - - If the undercloud was already deployed, the passwords file needs to be - present so passwords that can't be changed are persisted. If the file - is missing it will break the undercloud, so we should fail-fast and let - the user know about the problem. Both the old and new path to the file - is checked. If either is found, the validation will pass as the old - path will be migrated to the new during and update/upgrade. - """ - old_passwd_path = None - file_name = 'tripleo-undercloud-passwords.yaml' - - output_dir = utils.get_output_dir(CONF.get('output_dir')) - - if CONF.get('output_dir'): - old_passwd_path = os.path.join(CONF.get('output_dir'), file_name) - - # If old_passwd_path is not yet set, then default to the old path - if not old_passwd_path: - old_passwd_path = os.path.join(constants.CLOUD_HOME_DIR, file_name) - - passwd_path = os.path.join(output_dir, file_name) - - if (os.path.isfile(os.path.join(constants.CLOUD_HOME_DIR, 'stackrc')) and - not (os.path.isfile(passwd_path) or - os.path.isfile(old_passwd_path))): - message = (_('The %s file is missing. This will cause all service ' - 'passwords to change and break the existing ' - 'undercloud. ') % passwd_path) - LOG.error(message) - raise FailedValidation(message) - - -def _validate_env_files_paths(): - """Verify the non-matching templates path vs env files paths""" - tht_path = CONF.get('templates') or constants.TRIPLEO_HEAT_TEMPLATES - roles_file = utils.rel_or_abs_path( - CONF.get('roles_file') or constants.UNDERCLOUD_ROLES_FILE, - tht_path) - - # get the list of jinja templates normally rendered for UC installations - LOG.debug(_("Using roles file {0} from {1}").format(roles_file, tht_path)) - process_templates = os.path.join(tht_path, - 'tools/process-templates.py') - python_interpreter = "/usr/bin/python{}".format(sys.version_info[0]) - p = _run_live_command( - [python_interpreter, process_templates, '--roles-data', roles_file, - '--dry-run'], - name='process-templates-dry-run', cwd=tht_path, wait=False) - - # parse the list for the rendered from j2 file names - result = p.communicate()[0] - j2_files_list = [] - for line in result.split("\n"): - if ((line.startswith('dry run') or line.startswith('jinja2')) and - line.endswith('.yaml')): - bname = os.path.basename(line.split(' ')[-1]) - if line.startswith('dry run'): - j2_files_list.append(bname) - if line.startswith('jinja2'): - j2_files_list.append(bname.replace('.j2', '')) - - for env_file in CONF['custom_env_files']: - env_file_abs = os.path.abspath(env_file) - if (os.path.dirname(env_file_abs) != os.path.abspath(tht_path) and - os.path.basename(env_file) in j2_files_list): - msg = _( - 'Heat environment external to the templates dir ' - 'can not reference j2 processed file %s') % env_file_abs - LOG.error(msg) - raise FailedValidation(msg) - - -def _run_yum_clean_all(instack_env): - args = ['sudo', 'yum', 'clean', 'all'] - LOG.info('Running yum clean all') - _run_live_command(args, instack_env, 'yum-clean-all') - LOG.info(_('yum-clean-all completed successfully')) - - -def _run_yum_update(instack_env): - args = ['sudo', 'yum', 'update', '-y'] - LOG.info('Running yum update') - _run_live_command(args, instack_env, 'yum-update') - LOG.info(_('yum-update completed successfully')) - - -def _validate_architecure_options(): - def error_handler(message): - LOG.error(_('Undercloud configuration validation failed: %s'), message) - raise FailedValidation(message) - - def _validate_additional_architectures(error_callback): - for arch in CONF['additional_architectures']: - if arch not in constants.ADDITIONAL_ARCHITECTURES: - params = {'architecture': arch, - 'all_architectures': - ' '.join(constants.ADDITIONAL_ARCHITECTURES) - } - error_callback(_('additional_architectures "%(architecture)s" ' - 'must be in the supported architecture list: ' - '%(all_architectures)s') % params) - - _validate_additional_architectures(error_handler) - - -def _checking_status(item): - LOG.info(_('Checking %s...') % item) - - -def _check_routed_networks_enabled_if_multiple_subnets_defined(): - if (len(CONF.subnets) > 1 and not CONF.enable_routed_networks): - msg = _('Multiple subnets specified: %s but routed networks are not ' - 'enabled.') % CONF.subnets - LOG.error(msg) - raise FailedValidation(msg) - - -def _validate_deprecetad_now_invalid_parameters(): - invalid_opts = [ - 'masquerade_network', - ] - deprecate_conf = cfg.CONF - invalid_opts_used = [] - - for invalid_opt in invalid_opts: - deprecate_conf.register_opts([cfg.StrOpt(invalid_opt)]) - if deprecate_conf.get(invalid_opt): - invalid_opts_used.append(invalid_opt) - if invalid_opts_used: - msg = _('Options that has been deprecated and removed/replaced ' - 'detected. Invalid options: %s') % invalid_opts_used - LOG.error(msg) - raise FailedValidation(msg) - del deprecate_conf - - -def _validate_dnsnameservers(s): - ip_version = netaddr.IPNetwork(s['cidr']).version - if s['dns_nameservers']: - nameservers = s['dns_nameservers'] - else: - nameservers = CONF.undercloud_nameservers - - for nameserver in nameservers: - if not netaddr.IPAddress(nameserver).version == ip_version: - message = (_('IP version missmatch. Nameserver {0} is not valid ' - 'for subnet {1}').format(nameserver, s['cidr'])) - LOG.error(message) - raise FailedValidation(message) - - -def _check_all_or_no_subnets_use_dns_nameservers(): - x = [CONF.get(s).get('dns_nameservers') for s in CONF.subnets] - if any(([len(y) == 0 for y in x])) and any(([len(y) > 0 for y in x])): - message = (_('Option dns_nameservers is defined for subnets: {0}. ' - 'Option dns_nameservers is also required for subnets: ' - '{1}.').format( - ', '.join([s for s in CONF.subnets if - CONF.get(s).get('dns_nameservers')]), - ', '.join([s for s in CONF.subnets if - not CONF.get(s).get('dns_nameservers')]))) - LOG.error(message) - raise FailedValidation(message) - - -def check(verbose_level, upgrade=False, net_config_yaml=None): - # Fetch configuration and use its log file param to add logging to a file - utils.load_config(CONF, constants.UNDERCLOUD_CONF_PATH) - utils.configure_logging(LOG, verbose_level, CONF['undercloud_log_file']) - - # data = {opt.name: CONF[opt.name] for opt in _opts} - try: - # Other validations - _checking_status('Hostname') - utils.check_hostname() - _checking_status('Memory') - _check_memory() - _checking_status('Disk space') - _run_validations(upgrade) - _checking_status('Sysctl') - _check_sysctl() - _checking_status('Password file') - _validate_passwords_file() - _checking_status('Deprecated now invalid options') - _validate_deprecetad_now_invalid_parameters() - # Heat templates validations - if CONF.get('custom_env_files'): - _checking_status('Custom env file') - _validate_env_files_paths() - # Networking validations - _checking_status('Networking values') - _validate_value_formats() - _check_routed_networks_enabled_if_multiple_subnets_defined() - _check_all_or_no_subnets_use_dns_nameservers() - for subnet in CONF.subnets: - s = CONF.get(subnet) - _checking_status('Subnet "%s" is in CIDR' % subnet) - _validate_in_cidr(s, subnet) - _checking_status('DHCP range is in subnet "%s"' % subnet) - _validate_dhcp_range(s, subnet) - _checking_status('Inspection range for subnet "%s"' % subnet) - _validate_inspection_range(s) - _validate_dnsnameservers(s) - _checking_status('IP addresses') - _validate_ips() - _checking_status('Network interfaces') - _validate_interface_exists() - _checking_status('Provisioning IP change') - _validate_no_ip_change(net_config_yaml) - _checking_status('Architecture') - _validate_architecure_options() - except KeyError as e: - LOG.error(_('Key error in configuration: {error}\n' - 'Value is missing in configuration.').format(error=e)) - sys.exit(1) - except FailedValidation as e: - LOG.error(_('An error occurred during configuration ' - 'validation, please check your host ' - 'configuration and try again.\nError ' - 'message: {error}').format(error=e)) - sys.exit(1) - except RuntimeError as e: - LOG.error(_('An error occurred during configuration ' - 'validation, please check your host ' - 'configuration and try again. Error ' - 'message: {error}').format(error=e)) - sys.exit(1) diff --git a/tripleoclient/v2/__init__.py b/tripleoclient/v2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/v2/overcloud_ceph.py b/tripleoclient/v2/overcloud_ceph.py deleted file mode 100644 index c6b8f6d04..000000000 --- a/tripleoclient/v2/overcloud_ceph.py +++ /dev/null @@ -1,1148 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import os -import re -import uuid -import yaml - -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleo_common.utils import passwords - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils - - -def arg_parse_common(parser): - """Multiple classes below need these arguments added - """ - parser.add_argument('--cephadm-ssh-user', dest='cephadm_ssh_user', - help=_("Name of the SSH user used by cephadm. " - "Warning: if this option is used, it " - "must be used consistently for every " - "'openstack overcloud ceph' call. " - "Defaults to 'ceph-admin'. " - "(default=Env: CEPHADM_SSH_USER)"), - default=utils.env("CEPHADM_SSH_USER", - default="ceph-admin")) - - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument( - '--working-dir', action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy/"')) - - return parser - - -def ceph_hosts_in_inventory(ceph_hosts, ceph_spec, inventory): - """Raise command error if any ceph_hosts are not in the inventory - """ - all_hosts = oooutils.parse_ansible_inventory(inventory) - for ceph_host in ceph_hosts['_admin'] + ceph_hosts['non_admin']: - if ceph_host not in all_hosts: - raise oscexc.CommandError( - "Ceph host '%s' from Ceph spec '%s' was " - "not found in Ansible inventory '%s' so " - "unable to modify that host via Ansible." - % (ceph_host, ceph_spec, inventory)) - - -class OvercloudCephDeploy(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudCephDeploy") - auth_required = False - - def get_parser(self, prog_name): - parser = super(OvercloudCephDeploy, self).get_parser(prog_name) - - parser.add_argument('baremetal_env', nargs='?', - metavar='', - help=_('Path to the environment file ' - 'output from "openstack ' - 'overcloud node provision". ' - 'This argument may be excluded ' - 'only if --ceph-spec is used.')) - parser.add_argument('-o', '--output', required=True, - metavar='', - help=_('The path to the output environment ' - 'file describing the Ceph deployment ' - ' to pass to the overcloud deployment.')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt before overwriting an ' - 'existing output file ' - '(assume yes).')) - parser.add_argument('--skip-user-create', default=False, - action='store_true', - help=_("Do not create the cephadm SSH user. " - "This user is necessary to deploy but " - "may be created in a separate step via " - "'openstack overcloud ceph user enable'.")) - parser.add_argument('--skip-hosts-config', default=False, - action='store_true', - help=_("Do not update /etc/hosts on deployed " - "servers. By default this is configured " - "so overcloud nodes can reach each other " - "and the undercloud by name.")) - parser.add_argument('--skip-container-registry-config', default=False, - action='store_true', - help=_("Do not update " - "/etc/containers/registries.conf on " - "deployed servers. By default this is " - "configured so overcloud nodes can pull " - "containers from the undercloud registry.")) - parser = arg_parse_common(parser) - parser.add_argument('--roles-data', - help=_( - "Path to an alternative roles_data.yaml. " - "Used to decide which node gets which " - "Ceph mon, mgr, or osd service " - "based on the node's role in " - "."), - default=os.path.join( - constants.TRIPLEO_HEAT_TEMPLATES, - constants.OVERCLOUD_ROLES_FILE)) - parser.add_argument('--network-data', - help=_( - "Path to an alternative network_data.yaml. " - "Used to define Ceph public_network and " - "cluster_network. This file is searched " - "for networks with name_lower values of " - "storage and storage_mgmt. If none found, " - "then search repeats but with " - "service_net_map_replace in place of " - "name_lower. Use --public-network-name or " - "--cluster-network-name options to override " - "name of the searched for network from " - "storage or storage_mgmt to a customized " - "name. If network_data has no storage " - "networks, both default to ctlplane. " - "If found network has >1 subnet, they are " - "all combined (for routed traffic). " - "If a network has ipv6 true, then " - "the ipv6_subnet is retrieved instead " - "of the ip_subnet, and the Ceph global " - "ms_bind_ipv4 is set false and the " - "ms_bind_ipv6 is set true. Use --config " - "to override these defaults if desired."), - default=os.path.join( - constants.TRIPLEO_HEAT_TEMPLATES, - constants.OVERCLOUD_NETWORKS_FILE)) - parser.add_argument('--public-network-name', - help=_( - "Name of the network defined in " - "network_data.yaml which should be " - "used for the Ceph public_network. " - "Defaults to 'storage'."), - default='storage') - parser.add_argument('--cluster-network-name', - help=_( - "Name of the network defined in " - "network_data.yaml which should be " - "used for the Ceph cluster_network. " - "Defaults to 'storage_mgmt'."), - default='storage_mgmt') - parser.add_argument('--cluster', - help=_( - "Name of the Ceph cluster. " - "If set to 'foo', then the files " - "/etc/ceph//foo.conf and " - "/etc/ceph//foo.client.admin.keyring " - "will be created. Otherwise these " - "files will use the name 'ceph'. " - "Changing this means changing command line " - "calls too, e.g. 'ceph health' will become " - "'ceph --cluster foo health' unless export " - "CEPH_ARGS='--cluster foo' is used."), - default='ceph') - parser.add_argument('--mon-ip', - help=_( - "IP address of the first Ceph monitor. " - "If not set, an IP from the Ceph " - "public_network of a server with the " - "mon label from the Ceph spec is used. " - "IP must already be active on server."), - default='') - parser.add_argument('--config', - help=_( - "Path to an existing ceph.conf with settings " - "to be assimilated by the new cluster via " - "'cephadm bootstrap --config' ")), - parser.add_argument('--cephadm-extra-args', - help=_( - "String of extra parameters to pass cephadm. " - "E.g. if --cephadm-extra-args '--log-to-file " - " --skip-prepare-host', then cephadm boostrap " - "will use those options. Warning: requires " - "--force as not all possible options ensure a " - "functional deployment.")), - parser.add_argument('--force', default=False, - action='store_true', - help=_("Run command regardless of consequences.")) - parser.add_argument('--ansible-extra-vars', - help=_( - "Path to an existing Ansible vars file which " - "can override any variable in " - "tripleo-ansible. If " - "'--ansible-extra-vars vars.yaml' is passed, " - "then 'ansible-playbook -e @vars.yaml ...' is " - "used to call tripleo-ansible Ceph roles. " - "Warning: requires --force as not all " - "options ensure a functional deployment.")) - parser.add_argument('--ceph-client-username', - help=_( - "Name of the cephx user. E.g. if " - "'openstack' is used, then " - "'ceph auth get client.openstack' will " - "return a working user with key and " - "capabilities on the deployed Ceph cluster. " - "Ignored unless tripleo_cephadm_pools is set " - "via --ansible-extra-vars. " - "If this parameter is not set and " - "tripleo_cephadm_keys is set via " - "--ansible-extra-vars, then " - "'openstack' will be used. " - "Used to set CephClientUserName in --output."), - default='openstack'), - parser.add_argument('--ceph-client-key', - help=_( - "Value of the cephx key. E.g. " - "'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='. " - "Ignored unless tripleo_cephadm_pools is set " - "via --ansible-extra-vars. " - "If this parameter is not set and " - "tripleo_cephadm_keys is set via " - "--ansible-extra-vars, then a random " - "key will be generated. " - "Used to set CephClientKey in --output."), - default='') - parser.add_argument('--skip-cephx-keys', default=False, - action='store_true', - help=_("Do not create cephx keys even if " - "tripleo_cephadm_pools is set via " - "--ansible-extra-vars. If this option " - "is used, then even the defaults of " - "--ceph-client-key and " - "--ceph-client-username are ignored, " - "but the pools defined via " - "--ansible-extra-vars " - "are still created.")) - parser.add_argument('--ceph-vip', - help=_( - "Path to an existing Ceph services/network " - "mapping file."), - default=None), - parser.add_argument('--daemons', - help=_( - "Path to an existing Ceph daemon options " - "definition."), - default=None), - parser.add_argument('--single-host-defaults', default=False, - action='store_true', - help=_("Adjust configuration defaults to suit " - "a single-host Ceph cluster.")) - spec_group = parser.add_mutually_exclusive_group() - spec_group.add_argument('--ceph-spec', - help=_( - "Path to an existing Ceph spec file. If " - "not provided a spec will be generated " - "automatically based on --roles-data and " - ". The " - " parameter is " - "optional only if --ceph-spec is used."), - default=None) - spec_group.add_argument('--osd-spec', - help=_( - "Path to an existing OSD spec file. " - "Mutually exclusive with --ceph-spec. " - "If the Ceph spec file is generated " - "automatically, then the OSD spec " - "in the Ceph spec file defaults to " - "{data_devices: {all: true}} " - "for all service_type osd. " - "Use --osd-spec to override the " - "data_devices value inside the " - "Ceph spec file."), - default=None) - parser.add_argument('--crush-hierarchy', - help=_( - "Path to an existing crush hierarchy spec " - "file. "), - default=None) - parser.add_argument('--standalone', default=False, - action='store_true', - help=_("Use single host Ansible inventory. " - "Used only for development or testing " - "environments.")) - parser.add_argument('--container-image-prepare', - help=_( - "Path to an alternative " - "container_image_prepare_defaults.yaml. " - "Used to control which Ceph container is " - "pulled by cephadm via the ceph_namespace, " - "ceph_image, and ceph_tag variables in " - "addition to registry authentication via " - "ContainerImageRegistryCredentials." - ), - default=None) - parser.add_argument('--cephadm-default-container', default=False, - action='store_true', - help=_("Use the default continer defined in " - "cephadm instead of " - "container_image_prepare_defaults.yaml. " - "If this is used, 'cephadm bootstrap' is " - "not passed the --image parameter.")) - container_group = parser.add_argument_group("container-image-prepare " - "overrides", - "The following options " - "may be used to override " - "individual values " - "set via " - "--container-image-prepare" - ". If the example " - "variables below were " - "set the image would be " - "concatenated into " - "quay.io/ceph/ceph:latest " - "and a custom registry " - "login would be used." - ) - container_group.add_argument('--container-namespace', - required=False, - help='e.g. quay.io/ceph') - container_group.add_argument('--container-image', - required=False, - help='e.g. ceph') - container_group.add_argument('--container-tag', - required=False, - help='e.g. latest') - container_group.add_argument('--registry-url', - required=False, - help='') - container_group.add_argument('--registry-username', - required=False, - help='') - container_group.add_argument('--registry-password', - required=False, - help='') - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - output_path = os.path.abspath(parsed_args.output) - overwrite = parsed_args.yes - ceph_ingress = True - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s. See the --yes parameter to " - "override this behavior. " % - parsed_args.output) - else: - overwrite = True - - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - if parsed_args.standalone: - inventory = oooutils.standalone_ceph_inventory(working_dir) - ceph_ingress = False - else: - inventory = os.path.join(working_dir, - constants.TRIPLEO_STATIC_INVENTORY) - if not os.path.exists(inventory): - raise oscexc.CommandError( - "Inventory file not found in working directory: " - "%s. It should have been created by " - "'openstack overcloud node provision'." - % inventory) - - # mandatory extra_vars are now set, add others conditionally - extra_vars = { - "deployed_ceph_tht_path": output_path, - "working_dir": working_dir, - "stack_name": parsed_args.stack, - "tripleo_cephadm_standalone": parsed_args.standalone, - "tripleo_cephadm_ingress": ceph_ingress, - "tripleo_ceph_client_vars": os.path.join(working_dir, - 'ceph_client.yml') - } - extra_vars_file = None - # optional paths to pass to playbook - if parsed_args.ceph_spec is None and \ - parsed_args.baremetal_env is None: - raise oscexc.CommandError( - "Either " - "or --ceph-spec must be used.") - - if parsed_args.baremetal_env: - baremetal_env_path = os.path.abspath(parsed_args.baremetal_env) - if not os.path.exists(baremetal_env_path): - raise oscexc.CommandError( - "Baremetal environment file does not exist:" - " %s" % parsed_args.baremetal_env) - else: - extra_vars['baremetal_deployed_path'] = \ - os.path.abspath(parsed_args.baremetal_env) - - if parsed_args.roles_data: - if not os.path.exists(parsed_args.roles_data): - raise oscexc.CommandError( - "Roles Data file not found --roles-data %s." - % os.path.abspath(parsed_args.roles_data)) - else: - extra_vars['tripleo_roles_path'] = \ - os.path.abspath(parsed_args.roles_data) - - if parsed_args.config: - if not os.path.exists(parsed_args.config): - raise oscexc.CommandError( - "Config file not found --config %s." - % os.path.abspath(parsed_args.config)) - else: - extra_vars['tripleo_cephadm_bootstrap_conf'] = \ - os.path.abspath(parsed_args.config) - - if parsed_args.network_data: - if not os.path.exists(parsed_args.network_data): - raise oscexc.CommandError( - "Network Data file not found --network-data %s." - % os.path.abspath(parsed_args.network_data)) - - ceph_networks_map = \ - oooutils.get_ceph_networks(parsed_args.network_data, - parsed_args.public_network_name, - parsed_args.cluster_network_name) - extra_vars = {**extra_vars, **ceph_networks_map} - - if parsed_args.cluster: - extra_vars['tripleo_cephadm_cluster'] = \ - parsed_args.cluster - - if parsed_args.mon_ip: - if not oooutils.is_valid_ip(parsed_args.mon_ip): - raise oscexc.CommandError( - "Invalid IP address '%s' passed to --mon-ip." - % parsed_args.mon_ip) - else: - extra_vars['tripleo_cephadm_first_mon_ip'] = \ - parsed_args.mon_ip - - if parsed_args.ceph_spec: - if not os.path.exists(parsed_args.ceph_spec): - raise oscexc.CommandError( - "Ceph Spec file not found --ceph-spec %s." - % os.path.abspath(parsed_args.ceph_spec)) - else: - extra_vars['dynamic_ceph_spec'] = False - extra_vars['ceph_spec_path'] = \ - os.path.abspath(parsed_args.ceph_spec) - - if parsed_args.osd_spec: - if not os.path.exists(parsed_args.osd_spec): - raise oscexc.CommandError( - "OSD Spec file not found --osd-spec %s." - % os.path.abspath(parsed_args.osd_spec)) - else: - extra_vars['osd_spec_path'] = \ - os.path.abspath(parsed_args.osd_spec) - - if parsed_args.crush_hierarchy: - if not os.path.exists(parsed_args.crush_hierarchy): - raise oscexc.CommandError( - "Crush Hierarchy Spec file not found --crush-hierarchy %s." - % os.path.abspath(parsed_args.crush_hierarchy)) - else: - extra_vars['crush_hierarchy_path'] = \ - os.path.abspath(parsed_args.crush_hierarchy) - if parsed_args.ceph_vip: - if not os.path.exists(parsed_args.ceph_vip): - raise oscexc.CommandError( - "ceph vip mapping file not found --ceph-vip %s." - % os.path.abspath(parsed_args.ceph_vip)) - else: - extra_vars['tripleo_cephadm_ha_services_path'] = \ - os.path.abspath(parsed_args.ceph_vip) - if parsed_args.cephadm_extra_args and not parsed_args.force: - raise oscexc.CommandError( - "--cephadm-extra-args requires --force.") - if parsed_args.cephadm_extra_args and parsed_args.force: - extra_vars['tripleo_cephadm_extra_args'] = \ - parsed_args.cephadm_extra_args - - if parsed_args.ansible_extra_vars and not parsed_args.force: - raise oscexc.CommandError( - "--ansible-extra-vars requires --force.") - if parsed_args.ansible_extra_vars and parsed_args.force: - if not os.path.exists(parsed_args.ansible_extra_vars): - raise oscexc.CommandError( - "Ansible vars file not found --ansible-extra-vars %s." - % os.path.abspath(parsed_args.ansible_extra_vars)) - else: - # utils.run_ansible_playbook() assums extra_vars_file is a dict - with open(os.path.abspath(parsed_args.ansible_extra_vars), - 'r') as f: - extra_vars_file = yaml.safe_load(f) - if (not parsed_args.skip_cephx_keys and - 'tripleo_cephadm_pools' in extra_vars_file and # noqa - 'tripleo_cephadm_keys' not in extra_vars_file): # noqa - # create tripleo_cephadm_keys and add it to extra_vars - self.log.debug("Generating tripleo_cephadm_keys") - if len(parsed_args.ceph_client_key) == 0: - parsed_args.ceph_client_key = \ - passwords.create_cephx_key() - cephx = re.compile(r"^[a-zA-Z0-9+/]{38}==$") - if not cephx.match(parsed_args.ceph_client_key): - msg = ("'%s' is not a valid cephx key" - % str(parsed_args.ceph_client_key)) - raise oscexc.CommandError(msg) - extra_vars['tripleo_cephadm_keys'] = \ - oooutils.get_tripleo_cephadm_keys( - parsed_args.ceph_client_username, - parsed_args.ceph_client_key, - list(map(lambda x: x.get('name', ''), - extra_vars_file['tripleo_cephadm_pools'])) - ) - # pass CLI args to THT via --output deployed_ceph.yaml - extra_vars['ceph_client_key'] = parsed_args.ceph_client_key - extra_vars['ceph_client_username'] =\ - parsed_args.ceph_client_username - else: - self.log.debug("Not generating tripleo_cephadm_keys. " - "Either --skip-cephx-keys was used, " - "or tripleo_cephadm_pools was not " - "in %s, or tripleo_cephadm_keys was " - "in %s." % ( - (parsed_args.ansible_extra_vars,)*2)) - - if parsed_args.daemons: - if not os.path.exists(parsed_args.daemons): - raise oscexc.CommandError( - "ceph daemon options file not found --daemons %s." - % os.path.abspath(parsed_args.daemons)) - else: - daemon_opt = oooutils.process_ceph_daemons( - os.path.abspath(parsed_args.daemons)) - # merge the processed extra_vars for daemons - extra_vars = {**extra_vars, **daemon_opt} - # optional container vars to pass to playbook - keys = ['ceph_namespace', 'ceph_image', 'ceph_tag'] - push_sub_keys = ['ceph_namespace'] - key = 'ContainerImagePrepare' - container_dict = \ - oooutils.parse_container_image_prepare(key, keys, - parsed_args. - container_image_prepare, - push_sub_keys) - - extra_vars['tripleo_cephadm_container_ns'] = \ - parsed_args.container_namespace or \ - container_dict['ceph_namespace'] - extra_vars['tripleo_cephadm_container_image'] = \ - parsed_args.container_image or \ - container_dict['ceph_image'] - extra_vars['tripleo_cephadm_container_tag'] = \ - parsed_args.container_tag or \ - container_dict['ceph_tag'] - - # optional container registry vars to pass to playbook - if 'tripleo_cephadm_container_ns' in extra_vars: - keys = [extra_vars['tripleo_cephadm_container_ns']] - key = 'ContainerImageRegistryCredentials' - registry_dict = \ - oooutils.parse_container_image_prepare(key, keys, - parsed_args. - container_image_prepare) - # It's valid for the registry_dict to be empty so - # we cannot default to it with an 'or' like we can - # for ceph_{namespace,image,tag} as above. - if 'registry_url' in registry_dict: - extra_vars['tripleo_cephadm_registry_url'] = \ - registry_dict['registry_url'] - if 'registry_password' in registry_dict: - extra_vars['tripleo_cephadm_registry_password'] = \ - registry_dict['registry_password'] - if 'registry_username' in registry_dict: - extra_vars['tripleo_cephadm_registry_username'] = \ - registry_dict['registry_username'] - # Whether registry vars came out of --container-image-prepare - # or not, we need either to set them (as above) or override - # them if they were passed via the CLI (as follows) - if parsed_args.registry_url: - extra_vars['tripleo_cephadm_registry_url'] = \ - parsed_args.registry_url - if parsed_args.registry_password: - extra_vars['tripleo_cephadm_registry_password'] = \ - parsed_args.registry_password - if parsed_args.registry_username: - extra_vars['tripleo_cephadm_registry_username'] = \ - parsed_args.registry_username - - if parsed_args.cephadm_ssh_user: - extra_vars["tripleo_cephadm_ssh_user"] = \ - parsed_args.cephadm_ssh_user - - if parsed_args.single_host_defaults: - extra_vars["tripleo_cephadm_single_host_defaults"] = True - - if parsed_args.cephadm_default_container: - extra_vars["tripleo_cephadm_default_container"] = True - - skip_tags = [] - if parsed_args.skip_user_create: - skip_tags.append('cephadm_ssh_user') - - if not parsed_args.skip_hosts_config: - # call playbook to set /etc/hosts - uc_host_list = [oooutils.get_undercloud_host_entry()] - ctlplane_map = oooutils.get_ctlplane_attrs() - short_name = oooutils.get_hostname(short=True) - long_name = oooutils.get_hostname(short=False) - dns_domain = long_name.replace(short_name+'.', '') - hosts_extra_vars = dict( - tripleo_hosts_entries_undercloud_hosts_entries=uc_host_list, - tripleo_hosts_entries_overcloud_hosts_entries=[], - tripleo_hosts_entries_vip_hosts_entries=[], - tripleo_hosts_entries_extra_hosts_entries=[], - tripleo_stack_name=parsed_args.stack, - hostname_resolve_network=ctlplane_map['network']['name'], - cloud_domain=dns_domain, - ) - self.log.debug("Adding undercloud host entry " - "to overcloud /etc/hosts. (%s)" - % hosts_extra_vars) - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-hosts-file-config.yaml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - reproduce_command=False, - extra_vars=hosts_extra_vars, - extra_vars_file=extra_vars_file, - ) - else: - self.log.debug("Not updating /etc/hosts because " - "--skip-hosts-config was used.") - - if not parsed_args.skip_container_registry_config and \ - container_dict.get('push_destination_boolean', False): - # call playbook to set /etc/containers/registries.conf - ns = container_dict.get('ceph_namespace', '').partition('/')[0] - if ns: - reg_extra_vars = dict(tripleo_podman_insecure_registries=[ns]) - self.log.debug("Adding %s as a container registry." % ns) - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-container-registry-config.yaml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=reg_extra_vars, - extra_vars_file=extra_vars_file, - reproduce_command=False, - ) - else: - self.log.debug("Not updating container regsitry. " - "ceph_namespace is empty.") - else: - self.log.debug("Not updating container regsitry. " - "Either container_image_prepare_defaults.yaml " - "or file from --container-image-prepare " - "is not setting push_destination. Or " - "--skip-container-registry-config was used.") - - # call playbook to deploy ceph - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-deployed-ceph.yaml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - extra_vars_file=extra_vars_file, - reproduce_command=False, - skip_tags=','.join(skip_tags), - ) - - -class OvercloudCephUserDisable(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudCephUserDisable") - auth_required = False - - def get_parser(self, prog_name): - parser = super(OvercloudCephUserDisable, self).get_parser(prog_name) - parser.add_argument('ceph_spec', - metavar='', - help=_( - "Path to an existing Ceph spec file " - "which describes the Ceph cluster " - "where the cephadm SSH user will have " - "their public and private keys removed " - "and cephadm will be disabled. " - "Spec file is necessary to determine " - "which nodes to modify. " - "WARNING: Ceph cluster administration or " - "modification will no longer function.")) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt before disabling ' - 'cephadm and its SSH user. ' - '(assume yes).')) - parser = arg_parse_common(parser) - required = parser.add_argument_group('required named arguments') - required.add_argument('--fsid', - metavar='', required=True, - help=_("The FSID of the Ceph cluster to be " - "disabled. Required for disable option.")) - parser.add_argument('--standalone', default=False, - action='store_true', - help=_("Use single host Ansible inventory. " - "Used only for development or testing " - "environments.")) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - ceph_spec = os.path.abspath(parsed_args.ceph_spec) - - if not os.path.exists(ceph_spec): - raise oscexc.CommandError( - "Ceph spec file does not exist:" - " %s" % parsed_args.ceph_spec) - - overwrite = parsed_args.yes - if (not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Are you sure you want to disable Ceph ' - 'cluster management [y/N]?', - self.log)): - raise oscexc.CommandError("Will not disable cephadm and delete " - "the cephadm SSH user :" - " %s. See the --yes parameter to " - "override this behavior. " % - parsed_args.cephadm_ssh_user) - else: - overwrite = True - - # use stack and working_dir to find inventory - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - if parsed_args.standalone: - inventory = oooutils.standalone_ceph_inventory(working_dir) - else: - inventory = os.path.join(working_dir, - constants.TRIPLEO_STATIC_INVENTORY) - if not os.path.exists(inventory): - raise oscexc.CommandError( - "Inventory file not found in working directory: " - "%s. It should have been created by " - "'openstack overcloud node provision'." - % inventory) - ceph_hosts = oooutils.get_host_groups_from_ceph_spec(ceph_spec) - ceph_hosts_in_inventory(ceph_hosts, ceph_spec, inventory) - - if parsed_args.fsid: - try: - uuid.UUID(parsed_args.fsid) - except ValueError: - raise oscexc.CommandError( - "--fsid %s is not a valid UUID." - % parsed_args.fsid) - - if parsed_args.fsid: # if no FSID, then no ceph cluster to disable - # call the playbook to toggle cephadm w/ disable - # if tripleo_cephadm_backend isn't set it defaults to '' - extra_vars = { - "tripleo_cephadm_fsid": parsed_args.fsid, - "tripleo_cephadm_action": 'disable', - } - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='disable_cephadm.yml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - limit_hosts=ceph_hosts['_admin'][0], - reproduce_command=False, - ) - - # call the playbook to remove ssh_user_keys - extra_vars = { - "tripleo_cephadm_ssh_user": parsed_args.cephadm_ssh_user - } - if len(ceph_hosts['_admin']) > 0 or len(ceph_hosts['non_admin']) > 0: - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='ceph-admin-user-disable.yml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - limit_hosts=",".join(ceph_hosts['_admin'] - + ceph_hosts['non_admin']), - reproduce_command=False, - ) - - -class OvercloudCephUserEnable(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudCephUserEnable") - auth_required = False - - def get_parser(self, prog_name): - parser = super(OvercloudCephUserEnable, self).get_parser(prog_name) - parser.add_argument('ceph_spec', - metavar='', - help=_( - "Path to an existing Ceph spec file " - "which describes the Ceph cluster " - "where the cephadm SSH user will be " - "created (if necessary) and have their " - "public and private keys installed. " - "Spec file is necessary to determine " - "which nodes to modify and if " - "a public or private key is required.")) - parser.add_argument('--fsid', - metavar='', required=False, - help=_("The FSID of the Ceph cluster to be " - "(re-)enabled. If the user disable " - "option has been used, the FSID may " - "be passed to the user enable option " - "so that cephadm will be re-enabled " - "for the Ceph cluster idenified " - "by the FSID.")) - parser.add_argument('--standalone', default=False, - action='store_true', - help=_("Use single host Ansible inventory. " - "Used only for development or testing " - "environments.")) - parser = arg_parse_common(parser) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.fsid: - try: - uuid.UUID(parsed_args.fsid) - except ValueError: - raise oscexc.CommandError( - "--fsid %s is not a valid UUID." - % parsed_args.fsid) - - ceph_spec = os.path.abspath(parsed_args.ceph_spec) - - if not os.path.exists(ceph_spec): - raise oscexc.CommandError( - "Ceph spec file does not exist:" - " %s" % parsed_args.ceph_spec) - - # use stack and working_dir to find inventory - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - if parsed_args.standalone: - inventory = oooutils.standalone_ceph_inventory(working_dir) - else: - inventory = os.path.join(working_dir, - constants.TRIPLEO_STATIC_INVENTORY) - if not os.path.exists(inventory): - raise oscexc.CommandError( - "Inventory file not found in working directory: " - "%s. It should have been created by " - "'openstack overcloud node provision'." - % inventory) - - # get ceph hosts from spec and make sure they're in the inventory - ceph_hosts = oooutils.get_host_groups_from_ceph_spec(ceph_spec) - ceph_hosts_in_inventory(ceph_hosts, ceph_spec, inventory) - - extra_vars = { - "tripleo_admin_user": parsed_args.cephadm_ssh_user, - "distribute_private_key": True - } - for limit_list in [ceph_hosts['_admin'], ceph_hosts['non_admin']]: - if len(limit_list) > 0: - if parsed_args.standalone: - # In standalone, Ansible groups allovercloud and undercloud - # denote the same single host. So just use undercloud to - # avoid LP 1979093. - limit_list = ['undercloud'] - else: - # Need to include the undercloud, where the keys are - # generated, in the subset of allovercloud hosts, - # denoted by limit_list. - limit_list.append('undercloud') - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='ceph-admin-user-playbook.yml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - limit_hosts=",".join(limit_list), - reproduce_command=False, - ) - # _admin hosts are done now so don't distribute private key - extra_vars["distribute_private_key"] = False - - if parsed_args.fsid: # if no FSID, then no ceph cluster to disable - # Call the playbook to toggle cephadm w/ enable - extra_vars = { - "tripleo_cephadm_fsid": parsed_args.fsid, - "tripleo_cephadm_backend": 'cephadm', - "tripleo_cephadm_action": 'enable' - } - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='disable_cephadm.yml', - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - limit_hosts=ceph_hosts['_admin'][0], - reproduce_command=False, - ) - - -class OvercloudCephSpec(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudCephSpec") - auth_required = False - - def get_parser(self, prog_name): - parser = super(OvercloudCephSpec, self).get_parser(prog_name) - - parser.add_argument('baremetal_env', nargs='?', - metavar='', - help=_('Path to the environment file ' - 'output from "openstack ' - 'overcloud node provision". ' - 'This argument may be excluded ' - 'only if --standalone is used.')) - parser.add_argument('-o', '--output', required=True, - metavar='', - help=_('The path to the output cephadm spec ' - 'file to pass to the "openstack ' - 'overcloud ceph deploy --ceph-spec ' - '" command.')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt before overwriting an ' - 'existing output file ' - '(assume yes).')) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument( - '--working-dir', action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy/"')) - parser.add_argument('--roles-data', - help=_( - "Path to an alternative roles_data.yaml. " - "Used to decide which node gets which " - "Ceph mon, mgr, or osd service " - "based on the node's role in " - "."), - default=os.path.join( - constants.TRIPLEO_HEAT_TEMPLATES, - constants.OVERCLOUD_ROLES_FILE)) - parser.add_argument('--mon-ip', - help=_( - "IP address of the first Ceph monitor. " - "Only available with --standalone."), - default='') - parser.add_argument('--standalone', default=False, - action='store_true', - help=_("Create a spec file for a standalone " - "deployment. Used for single server " - "development or testing environments.")) - parser.add_argument('--osd-spec', - help=_( - "Path to an existing OSD spec file. " - "When the Ceph spec file is generated " - "its OSD spec defaults to " - "{data_devices: {all: true}} " - "for all service_type osd. " - "Use --osd-spec to override the " - "data_devices value inside the " - "Ceph spec file."), - default=None) - parser.add_argument('--crush-hierarchy', - help=_( - "Path to an existing crush hierarchy spec " - "file. "), - default=None) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - output_path = os.path.abspath(parsed_args.output) - overwrite = parsed_args.yes - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s. See the --yes parameter to " - "override this behavior. " % - parsed_args.output) - else: - overwrite = True - - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - if parsed_args.standalone: - inventory = oooutils.standalone_ceph_inventory(working_dir) - else: - inventory = os.path.join(working_dir, - constants.TRIPLEO_STATIC_INVENTORY) - if not os.path.exists(inventory): - raise oscexc.CommandError( - "Inventory file not found in working directory: " - "%s. It should have been created by " - "'openstack overcloud node provision'." - % inventory) - - # mandatory extra_vars are now set, add others conditionally - extra_vars = { - 'ceph_spec_path': output_path, - } - - # optional paths to pass to playbook - if parsed_args.standalone is None and \ - parsed_args.baremetal_env is None: - raise oscexc.CommandError( - "Either " - "or --standalone must be used.") - - if parsed_args.baremetal_env: - baremetal_env_path = os.path.abspath(parsed_args.baremetal_env) - if not os.path.exists(baremetal_env_path): - raise oscexc.CommandError( - "Baremetal environment file does not exist:" - " %s" % parsed_args.baremetal_env) - else: - extra_vars['baremetal_deployed_path'] = \ - os.path.abspath(parsed_args.baremetal_env) - - if parsed_args.roles_data: - if not os.path.exists(parsed_args.roles_data): - raise oscexc.CommandError( - "Roles Data file not found --roles-data %s." - % os.path.abspath(parsed_args.roles_data)) - else: - extra_vars['tripleo_roles_path'] = \ - os.path.abspath(parsed_args.roles_data) - - if parsed_args.mon_ip: - if not oooutils.is_valid_ip(parsed_args.mon_ip): - raise oscexc.CommandError( - "Invalid IP address '%s' passed to --mon-ip." - % parsed_args.mon_ip) - else: - if parsed_args.standalone: - extra_vars['tripleo_cephadm_first_mon_ip'] = \ - parsed_args.mon_ip - else: - raise oscexc.CommandError( - "Option --mon-ip may only be " - "used with --standalone") - - if parsed_args.osd_spec: - if not os.path.exists(parsed_args.osd_spec): - raise oscexc.CommandError( - "OSD Spec file not found --osd-spec %s." - % os.path.abspath(parsed_args.osd_spec)) - else: - extra_vars['osd_spec_path'] = \ - os.path.abspath(parsed_args.osd_spec) - - if parsed_args.crush_hierarchy: - if not os.path.exists(parsed_args.crush_hierarchy): - raise oscexc.CommandError( - "Crush Hierarchy Spec file not found --crush-hierarchy %s." - % os.path.abspath(parsed_args.crush_hierarchy)) - else: - extra_vars['crush_hierarchy_path'] = \ - os.path.abspath(parsed_args.crush_hierarchy) - - if parsed_args.standalone: - spec_playbook = 'cli-standalone-ceph-spec.yaml' - tags = '' - else: - spec_playbook = 'cli-deployed-ceph.yaml' - tags = 'ceph_spec' - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook=spec_playbook, - inventory=inventory, - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - reproduce_command=False, - tags=tags, - rotate_log=True, - ) diff --git a/tripleoclient/v2/overcloud_delete.py b/tripleoclient/v2/overcloud_delete.py deleted file mode 100644 index 693c9dada..000000000 --- a/tripleoclient/v2/overcloud_delete.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import os -import yaml - -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from osc_lib import utils as osc_utils - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils - - -class DeleteOvercloud(command.Command): - """Delete overcloud stack and plan""" - - log = logging.getLogger(__name__ + ".DeleteOvercloud") - - def get_parser(self, prog_name): - parser = super(DeleteOvercloud, self).get_parser(prog_name) - parser.add_argument('stack', nargs='?', - help=_('Name or ID of heat stack to delete' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=osc_utils.env('OVERCLOUD_STACK_NAME')) - parser.add_argument('-y', '--yes', - help=_('Skip yes/no prompt (assume yes).'), - default=False, - action="store_true") - parser.add_argument('-s', '--skip-ipa-cleanup', - help=_('Skip removing overcloud hosts, services, ' - 'and DNS records from FreeIPA. This is ' - 'particularly relevant for deployments ' - 'using certificates from FreeIPA for TLS. ' - 'By default, overcloud hosts, services, ' - 'and DNS records will be removed from ' - 'FreeIPA before deleting the overcloud. ' - 'Using this option might require you to ' - 'manually cleanup FreeIPA later.'), - default=False, - action="store_true") - parser.add_argument('-b', '--baremetal-deployment', - metavar='', - help=_('Configuration file describing the ' - 'baremetal deployment')) - parser.add_argument('--networks-file', - metavar='', - help=_('Configuration file describing the ' - 'network deployment to enable ' - 'unprovisioning of networks.')) - parser.add_argument('--network-ports', - help=_('DEPRECATED! Network ports will always be ' - 'unprovisioned.\n' - 'Enable unprovisioning of network ports'), - default=False, - action="store_true") - parser.add_argument( - '--heat-type', - action='store', - default='pod', - choices=['pod', 'container', 'native'], - help=_('DEPRECATED: This option is ineffective and ' - 'ignored after deprecation. The type of Heat ' - 'process that was used to execute the deployment.\n' - 'pod (Default): Use an ephemeral Heat pod.\n' - 'container: Use an ephemeral Heat container.\n' - 'native: Use an ephemeral Heat process.') - ) - return parser - - def _validate_args(self, parsed_args): - if parsed_args.stack in (None, ''): - raise oscexc.CommandError("You must specify a stack name") - if parsed_args.networks_file: - networks_file_path = os.path.abspath(parsed_args.networks_file) - if not os.path.exists(networks_file_path): - raise oscexc.CommandError( - "Network configuration file does not exist:" - " {args}".format(args=parsed_args.networks_file)) - - def take_action(self, parsed_args): - self.log.debug("take_action({args})".format(args=parsed_args)) - - self._validate_args(parsed_args) - - if not parsed_args.yes: - confirm = utils.prompt_user_for_confirmation( - message=_("Are you sure you want to delete this overcloud " - "[y/N]? "), - logger=self.log) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - if parsed_args.skip_ipa_cleanup: - playbooks = ["cli-overcloud-delete.yaml"] - else: - # Order is important, let's make sure we cleanup FreeIPA before we - # start removing infrastructure. - playbooks = ["cli-cleanup-ipa.yml", "cli-overcloud-delete.yaml"] - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbooks, - constants.ANSIBLE_INVENTORY.format(parsed_args.stack), - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - } - ) - - if parsed_args.baremetal_deployment: - with open(parsed_args.baremetal_deployment, 'r') as fp: - roles = yaml.safe_load(fp) - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-overcloud-node-unprovision.yaml', - workdir=tmp, - inventory='localhost,', - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "all": True, - "prompt": False, - "manage_network_ports": True, - } - ) - - if parsed_args.networks_file: - networks_file_path = os.path.abspath(parsed_args.networks_file) - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-overcloud-network-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars={ - "network_data_path": networks_file_path - } - ) - print("Success.") diff --git a/tripleoclient/v2/overcloud_network.py b/tripleoclient/v2/overcloud_network.py deleted file mode 100644 index 84b2bb0f4..000000000 --- a/tripleoclient/v2/overcloud_network.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging -import os - -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from osc_lib import utils - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils - - -class OvercloudNetworkExtract(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudNetworkExtract") - - def get_parser(self, prog_name): - parser = super(OvercloudNetworkExtract, self).get_parser(prog_name) - parser.add_argument('--stack', dest='stack', required=True, - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('-o', '--output', required=True, - metavar='', - help=_('The output file path describing the ' - 'network deployment')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt for existing files ' - '(assume yes).')) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - output_path = os.path.abspath(parsed_args.output) - - overwrite = parsed_args.yes - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s" % parsed_args.output) - else: - overwrite = True - - extra_vars = { - "stack_name": parsed_args.stack, - "output": output_path, - "overwrite": overwrite - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-network-extract.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - - -class OvercloudNetworkProvision(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudNetworkProvision") - - def get_parser(self, prog_name): - parser = super(OvercloudNetworkProvision, self).get_parser(prog_name) - - parser.add_argument('networks_file', - metavar='', - help=_('Configuration file describing the network ' - 'deployment.')) - parser.add_argument('-o', '--output', required=True, - metavar='', - help=_('The output network environment file ' - 'path.')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt for existing files ' - '(assume yes).')) - parser.add_argument('--templates', - help=_("The directory containing the Heat " - "templates to deploy"), - default=constants.TRIPLEO_HEAT_TEMPLATES) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack, when set the ' - 'networks file will be copied to the ' - 'working dir.'), - default=utils.env('OVERCLOUD_STACK_NAME', - default=None)) - parser.add_argument( - '--working-dir', action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy-"') - ) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - networks_file_path = os.path.abspath(parsed_args.networks_file) - output_path = os.path.abspath(parsed_args.output) - - if not os.path.exists(networks_file_path): - raise oscexc.CommandError( - "Network configuration file does not exist:" - " %s" % parsed_args.networks_file) - - overwrite = parsed_args.yes - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s" % parsed_args.output) - else: - overwrite = True - - extra_vars = { - "network_data_path": networks_file_path, - "network_deployed_path": output_path, - "overwrite": overwrite, - "templates": parsed_args.templates, - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-network-provision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - - if parsed_args.stack: - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - oooutils.copy_to_wd(working_dir, networks_file_path, - parsed_args.stack, 'networks') - - -class OvercloudVirtualIPsExtract(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudVirtualIPsExtract") - - def get_parser(self, prog_name): - parser = super(OvercloudVirtualIPsExtract, self).get_parser(prog_name) - parser.add_argument('--stack', dest='stack', required=True, - help=_('Name of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('-o', '--output', required=True, - metavar='', - help=_('The output file path describing the ' - 'Virtual IP deployment')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt for existing files ' - '(assume yes).')) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - output_path = os.path.abspath(parsed_args.output) - - overwrite = parsed_args.yes - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s" % parsed_args.output) - else: - overwrite = True - - extra_vars = { - "stack_name": parsed_args.stack, - "output": output_path, - "overwrite": overwrite - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-network-vip-extract.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - - -class OvercloudVirtualIPsProvision(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudVirtualIPsProvision") - - def get_parser(self, prog_name): - parser = super(OvercloudVirtualIPsProvision, self).get_parser( - prog_name) - - parser.add_argument('vip_file', - metavar='', - help=_('Configuration file describing the network ' - 'Virtual IPs.')) - parser.add_argument('--stack', dest='stack', required=True, - help=_('Name of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('-o', '--output', required=True, - metavar='', - help=_('The output Virtual IP environment file ' - 'path.')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt for existing files ' - '(assume yes).')) - parser.add_argument('--templates', - help=_("The directory containing the Heat " - "templates to deploy"), - default=constants.TRIPLEO_HEAT_TEMPLATES) - parser.add_argument( - '--working-dir', action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy-"') - ) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - vip_file_path = os.path.abspath(parsed_args.vip_file) - output_path = os.path.abspath(parsed_args.output) - - if not os.path.exists(vip_file_path): - raise oscexc.CommandError( - "Virtual IPs configuration file does not exist:" - " %s" % parsed_args.vip_file) - - overwrite = parsed_args.yes - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s" % parsed_args.output) - else: - overwrite = True - - extra_vars = { - "stack_name": parsed_args.stack, - "vip_data_path": vip_file_path, - "vip_deployed_path": output_path, - "overwrite": overwrite, - "templates": parsed_args.templates, - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-network-vip-provision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - - oooutils.copy_to_wd(working_dir, vip_file_path, parsed_args.stack, - 'vips') - - -class OvercloudNetworkUnprovision(command.Command): - - log = logging.getLogger(__name__ + ".OvercloudNetworkUnprovision") - - def get_parser(self, prog_name): - parser = super(OvercloudNetworkUnprovision, self).get_parser(prog_name) - - parser.add_argument('networks_file', - metavar='', - help=_('Configuration file describing the network ' - 'deployment.')) - parser.add_argument('-y', '--yes', - help=_('Skip yes/no prompt (assume yes).'), - default=False, - action="store_true") - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - networks_file_path = os.path.abspath(parsed_args.networks_file) - - if not parsed_args.yes: - confirm = oooutils.prompt_user_for_confirmation( - message=_("Are you sure you want to unprovision the networks " - "mentioned in file %s [y/N]? " % networks_file_path), - logger=self.log) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - if not os.path.exists(networks_file_path): - raise oscexc.CommandError( - "Network configuration file does not exist:" - " %s" % parsed_args.networks_file) - - extra_vars = { - "network_data_path": networks_file_path, - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-network-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) diff --git a/tripleoclient/v2/overcloud_node.py b/tripleoclient/v2/overcloud_node.py deleted file mode 100644 index fe87fcc39..000000000 --- a/tripleoclient/v2/overcloud_node.py +++ /dev/null @@ -1,493 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -import collections -import json -import logging -import os -import sys - -from cliff.formatters import table -from osc_lib import exceptions as oscexc -from osc_lib.i18n import _ -from osc_lib import utils -import yaml - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils as oooutils -from tripleoclient.workflows import baremetal - -# NOTE(cloudnull): V1 imports, These classes will be removed as they're -# converted from mistral to ansible. -from tripleoclient.v1.overcloud_node import CleanNode # noqa -from tripleoclient.v1.overcloud_node import ConfigureNode # noqa -from tripleoclient.v1.overcloud_node import DeleteNode # noqa -from tripleoclient.v1.overcloud_node import DiscoverNode # noqa -from tripleoclient.v1.overcloud_node import ProvideNode # noqa -from tripleoclient.workflows import tripleo_baremetal as tb - - -class ImportNode(command.Command): - """Import baremetal nodes from a JSON, YAML or CSV file. - - The node status will be set to 'manageable' by default. - """ - - log = logging.getLogger(__name__ + ".ImportNode") - - def get_parser(self, prog_name): - parser = super(ImportNode, self).get_parser(prog_name) - parser.add_argument('--introspect', - action='store_true', - help=_('Introspect the imported nodes')) - parser.add_argument('--run-validations', action='store_true', - default=False, - help=_('Run the pre-deployment validations. These' - ' external validations are from the' - ' TripleO Validations project.')) - parser.add_argument('--validate-only', action='store_true', - default=False, - help=_('Validate the env_file and then exit ' - 'without actually importing the nodes.')) - parser.add_argument('--provide', - action='store_true', - help=_('Provide (make available) the nodes')) - parser.add_argument('--no-deploy-image', action='store_true', - help=_('Skip setting the deploy kernel and ' - 'ramdisk.')) - parser.add_argument('--instance-boot-option', - choices=['local', 'netboot'], default=None, - help=_('Whether to set instances for booting from' - ' local hard drive (local) or network ' - ' (netboot)')) - parser.add_argument('--boot-mode', - choices=['uefi', 'bios'], default=None, - help=_('Whether to set the boot mode to UEFI ' - '(uefi) or legacy BIOS (bios)')) - parser.add_argument("--http-boot", - default=os.environ.get( - 'HTTP_BOOT', - constants.IRONIC_HTTP_BOOT_BIND_MOUNT), - help=_("Root directory for the " - " ironic-python-agent image")) - parser.add_argument('--concurrency', type=int, - default=20, - help=_('Maximum number of nodes to introspect at ' - 'once.')) - parser.add_argument('--verbosity', type=int, - default=1, - help=_('Print debug logs during execution')) - parser.add_argument('env_file', type=argparse.FileType('r')) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - nodes_config = oooutils.parse_env_file(parsed_args.env_file) - parsed_args.env_file.close() - - if parsed_args.validate_only: - return baremetal.validate_nodes(self.app.client_manager, - nodes_json=nodes_config) - - # Look for *specific* deploy images and update the node data if - # one is found. - if not parsed_args.no_deploy_image: - oooutils.update_nodes_deploy_data(nodes_config, - http_boot=parsed_args.http_boot) - nodes = baremetal.register_or_update( - self.app.client_manager, - nodes_json=nodes_config, - instance_boot_option=parsed_args.instance_boot_option, - boot_mode=parsed_args.boot_mode - ) - - nodes_uuids = [node.uuid for node in nodes] - - if parsed_args.introspect: - extra_vars = { - "node_uuids": nodes_uuids, - "run_validations": parsed_args.run_validations, - "concurrency": parsed_args.concurrency, - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-baremetal-introspect.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars - ) - - if parsed_args.provide: - provide = tb.TripleoProvide(verbosity=parsed_args.verbosity) - provide.provide(nodes=nodes_uuids) - - -class IntrospectNode(command.Command): - """Introspect specified nodes or all nodes in 'manageable' state.""" - - log = logging.getLogger(__name__ + ".IntrospectNode") - - def get_parser(self, prog_name): - parser = super(IntrospectNode, self).get_parser(prog_name) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('node_uuids', - nargs="*", - metavar="", - default=[], - help=_('Baremetal Node UUIDs for the node(s) to be ' - 'introspected')) - group.add_argument("--all-manageable", - action='store_true', - help=_("Introspect all nodes currently in " - "'manageable' state")) - parser.add_argument('--provide', - action='store_true', - help=_('Provide (make available) the nodes once ' - 'introspected')) - parser.add_argument('--run-validations', action='store_true', - default=False, - help=_('Run the pre-deployment validations. These ' - 'external validations are from the TripleO ' - 'Validations project.')) - parser.add_argument('--concurrency', type=int, - default=20, - help=_('Maximum number of nodes to introspect at ' - 'once.')) - parser.add_argument('--node-timeout', type=int, - default=1200, - help=_('Maximum timeout for node introspection.')) - parser.add_argument('--max-retries', type=int, - default=1, - help=_('Maximum introspection retries.')) - parser.add_argument('--retry-timeout', type=int, - default=120, - help=_('Maximum timeout between introspection' - 'retries')) - parser.add_argument('--verbosity', type=int, - default=1, - help=_('Print debug logs during execution')) - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.all_manageable: - baremetal.introspect_manageable_nodes( - self.app.client_manager, - run_validations=parsed_args.run_validations, - concurrency=parsed_args.concurrency, - node_timeout=parsed_args.node_timeout, - max_retries=parsed_args.max_retries, - retry_timeout=parsed_args.retry_timeout, - verbosity=oooutils.playbook_verbosity(self=self) - ) - else: - baremetal.introspect( - self.app.client_manager, - node_uuids=parsed_args.node_uuids, - run_validations=parsed_args.run_validations, - concurrency=parsed_args.concurrency, - node_timeout=parsed_args.node_timeout, - max_retries=parsed_args.max_retries, - retry_timeout=parsed_args.retry_timeout, - verbosity=oooutils.playbook_verbosity(self=self) - ) - - # NOTE(cloudnull): This is using the old provide function, in a future - # release this may be ported to a standalone playbook - if parsed_args.provide: - provide = tb.TripleoProvide(verbosity=parsed_args.verbosity) - if parsed_args.node_uuids: - provide.provide( - nodes=parsed_args.node_uuids, - ) - else: - provide.provide_manageable_nodes() - - -class ProvisionNode(command.Command): - """Provision new nodes using Ironic.""" - - log = logging.getLogger(__name__ + ".ProvisionNode") - - def get_parser(self, prog_name): - parser = super(ProvisionNode, self).get_parser(prog_name) - parser.add_argument('input', - metavar='', - help=_('Configuration file describing the ' - 'baremetal deployment')) - parser.add_argument('-o', '--output', - default='baremetal_environment.yaml', - help=_('The output environment file path')) - parser.add_argument('-y', '--yes', default=False, action='store_true', - help=_('Skip yes/no prompt for existing files ' - '(assume yes).')) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('--overcloud-ssh-user', - default='tripleo-admin', - help=_('User for SSH access to newly deployed ' - 'nodes')) - parser.add_argument('--overcloud-ssh-key', - default=None, - help=_('Key path for ssh access to' - 'overcloud nodes. When undefined the key' - 'will be autodetected.')) - parser.add_argument('--concurrency', type=int, - default=20, - help=_('Maximum number of nodes to provision at ' - 'once. (default=20)')) - parser.add_argument('--timeout', type=int, - default=3600, - help=_('Number of seconds to wait for the node ' - 'provision to complete. (default=3600)')) - parser.add_argument('--network-ports', - help=_('DEPRECATED! Network ports will always be ' - 'provisioned.\n' - 'Enable provisioning of network ports'), - default=False, - action="store_true") - parser.add_argument('--network-config', - help=_('Apply network config to provisioned ' - 'nodes. (Implies "--network-ports")'), - default=False, - action="store_true") - parser.add_argument('--templates', - help=_("The directory containing the Heat " - "templates to deploy"), - default=constants.TRIPLEO_HEAT_TEMPLATES) - parser.add_argument( - '--working-dir', action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy-"') - ) - - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.network_ports: - self.log.warning('DEPRECATED option "--network-ports" detected. ' - 'This option is no longer used, network ports ' - 'are always managed.') - - output_path = os.path.abspath(parsed_args.output) - - overwrite = parsed_args.yes - if (os.path.exists(output_path) and not overwrite - and not oooutils.prompt_user_for_confirmation( - 'Overwrite existing file %s [y/N]?' % parsed_args.output, - self.log)): - raise oscexc.CommandError("Will not overwrite existing file:" - " %s" % parsed_args.output) - else: - overwrite = True - - if not parsed_args.working_dir: - working_dir = oooutils.get_default_working_dir( - parsed_args.stack) - else: - working_dir = os.path.abspath(parsed_args.working_dir) - oooutils.makedirs(working_dir) - - roles_file_path = os.path.abspath(parsed_args.input) - roles_file_dir = os.path.dirname(roles_file_path) - with open(roles_file_path, 'r') as fp: - roles = yaml.safe_load(fp) - - oooutils.validate_roles_playbooks(roles_file_dir, roles) - - key = self.get_key_pair(parsed_args) - with open('{}.pub'.format(key), 'rt') as fp: - ssh_key = fp.read() - - extra_vars = { - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "baremetal_deployed_path": output_path, - "ssh_public_keys": ssh_key, - "ssh_private_key_file": key, - "ssh_user_name": parsed_args.overcloud_ssh_user, - "node_timeout": parsed_args.timeout, - "concurrency": parsed_args.concurrency, - "manage_network_ports": True, - "configure_networking": parsed_args.network_config, - "working_dir": working_dir, - "templates": parsed_args.templates, - "overwrite": overwrite, - } - - with oooutils.TempDirs() as tmp: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-node-provision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars=extra_vars, - ) - oooutils.run_role_playbooks(self, working_dir, roles_file_dir, - roles, parsed_args.network_config) - - oooutils.copy_to_wd(working_dir, roles_file_path, parsed_args.stack, - 'baremetal') - - print('Nodes deployed successfully, add %s to your deployment ' - 'environment' % parsed_args.output) - - -class UnprovisionNode(command.Command): - """Unprovisions nodes using Ironic.""" - - log = logging.getLogger(__name__ + ".UnprovisionNode") - - def get_parser(self, prog_name): - parser = super(UnprovisionNode, self).get_parser(prog_name) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument('--all', - help=_('Unprovision every instance in the ' - 'deployment'), - default=False, - action="store_true") - parser.add_argument('-y', '--yes', - help=_('Skip yes/no prompt (assume yes)'), - default=False, - action="store_true") - parser.add_argument('input', - metavar='', - help=_('Configuration file describing the ' - 'baremetal deployment')) - parser.add_argument('--network-ports', - help=_('DEPRECATED! Network ports will always be ' - 'unprovisioned.\n' - 'Enable unprovisioning of network ports'), - default=False, - action="store_true") - return parser - - def take_action(self, parsed_args): - self.log.debug("take_action(%s)" % parsed_args) - - if parsed_args.network_ports: - self.log.warning('DEPRECATED option "--network-ports" detected. ' - 'This option is no longer used, network ports ' - 'are always managed.') - - with open(parsed_args.input, 'r') as fp: - roles = yaml.safe_load(fp) - - with oooutils.TempDirs() as tmp: - unprovision_confirm = os.path.join(tmp, 'unprovision_confirm.json') - - if not parsed_args.yes: - oooutils.run_ansible_playbook( - playbook='cli-overcloud-node-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "all": parsed_args.all, - "prompt": True, - "unprovision_confirm": unprovision_confirm, - "manage_network_ports": True, - } - ) - with open(unprovision_confirm) as f: - to_unprovision = json.load(f) - - # (TODO: slagle) unprovision_confirm was previously a list, - # but was switched to a dict so that network ports for - # pre_provisioned nodes can also be confirmed for - # unprovisioning. Check the data structure for backwards - # compatibility, When the tripleo-ansible patch is merged, - # this check can be removed. - if isinstance(to_unprovision, dict): - instances = to_unprovision.get('instances') - pre_provisioned = to_unprovision.get('pre_provisioned') - else: - instances = to_unprovision - pre_provisioned = None - - print() - if not (instances or pre_provisioned): - print('Nothing to unprovision, exiting') - return - print("The following nodes will be unprovisioned:") - self._print_nodes(instances) - print() - if pre_provisioned: - print("The following pre-provisioned nodes will " - "have network ports unprovisioned:") - self._print_nodes(pre_provisioned) - print() - - confirm = oooutils.prompt_user_for_confirmation( - message=_("Are you sure you want to unprovision these %s " - "nodes and ports [y/N]? ") % parsed_args.stack, - logger=self.log) - if not confirm: - raise oscexc.CommandError("Action not confirmed, exiting.") - - oooutils.run_ansible_playbook( - playbook='cli-overcloud-node-unprovision.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=oooutils.playbook_verbosity(self=self), - extra_vars={ - "stack_name": parsed_args.stack, - "baremetal_deployment": roles, - "all": parsed_args.all, - "prompt": False, - "manage_network_ports": True, - } - ) - - print('Unprovision complete') - - def _print_nodes(self, nodes): - TableArgs = collections.namedtuple( - 'TableArgs', 'print_empty max_width fit_width') - args = TableArgs(print_empty=True, max_width=-1, fit_width=True) - nodes_data = [(i.get('hostname', ''), - i.get('name', ''), - i.get('id', '')) for i in nodes] - - sys.stdout.write('\n') - formatter = table.TableFormatter() - formatter.emit_list( - column_names=['hostname', 'name', 'id'], - data=nodes_data, - stdout=sys.stdout, - parsed_args=args - ) diff --git a/tripleoclient/v2/overcloud_support.py b/tripleoclient/v2/overcloud_support.py deleted file mode 100644 index 7072c0399..000000000 --- a/tripleoclient/v2/overcloud_support.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging - -from osc_lib.i18n import _ - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils - - -class ReportExecute(command.Command): - """Run sosreport on selected servers.""" - - log = logging.getLogger(__name__ + ".ReportExecute") - - def get_parser(self, prog_name): - parser = super(ReportExecute, self).get_parser(prog_name) - parser.add_argument('server_name', - help=_('Server name, group name, or partial name' - ' to match. For example "Controller" will' - ' match all controllers for an' - ' environment.')) - parser.add_argument('--stack', - help=_("Stack name to use for log collection."), - default='overcloud') - # Deprecated in U - parser.add_argument('-c', - '--container', - dest='container', - default=None, - help=_('This option no-longer has any effect.')) - parser.add_argument('-o', - '--output', - dest='destination', - default='/var/lib/tripleo/support', - help=_('Output directory for the report')) - # Deprecated in U - parser.add_argument('--skip-container-delete', - dest='skip_delete', - default=False, - help=_('This option no-longer has any effect.'), - action='store_true') - # Deprecated in U - parser.add_argument('-t', - '--timeout', - dest='timeout', - type=int, - default=None, - help=_('This option no-longer has any effect.')) - # Deprecated in U - parser.add_argument('-n', - '--concurrency', - dest='concurrency', - type=int, - default=None, - help=_('This option no-longer has any effect.')) - # Deprecated in U - parser.add_argument('--collect-only', - dest='collect_only', - help=_('This option no-longer has any effect.'), - default=False, - action='store_true') - # Deprecated in U - parser.add_argument('--download-only', - dest='download_only', - help=_('This option no-longer has any effect.'), - default=False, - action='store_true') - return parser - - def take_action(self, parsed_args): - self.log.debug('take_action({})'.format(parsed_args)) - - extra_vars = { - 'server_name': parsed_args.server_name, - 'sos_destination': parsed_args.destination, - } - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-support-collect-logs.yaml', - inventory=constants.ANSIBLE_INVENTORY.format( - parsed_args.stack - ), - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=utils.playbook_verbosity(self=self), - extra_vars=extra_vars - ) diff --git a/tripleoclient/v2/tripleo_container_image.py b/tripleoclient/v2/tripleo_container_image.py deleted file mode 100644 index 46d18f34f..000000000 --- a/tripleoclient/v2/tripleo_container_image.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -import os -import re -import uuid -import yaml - -from osc_lib.i18n import _ - -from oslo_config import cfg -from oslo_log import log as logging - -from tripleo_common.exception import NotFound -from tripleo_common.image.builder import buildah - -from tripleoclient import command -from tripleoclient import constants -from tripleoclient import utils - - -CONF = cfg.CONF - -DEFAULT_AUTHFILE = "{}/containers/auth.json".format( - os.environ.get("XDG_RUNTIME_DIR", os.path.expanduser("~")) -) -DEFAULT_ENV_AUTHFILE = os.environ.get("REGISTRY_AUTH_FILE", DEFAULT_AUTHFILE) -DEFAULT_CONFIG = "tripleo_containers.yaml" -DEFAULT_TCIB_CONFIG_BASE = "tcib" -SUPPORTED_RHEL_MODULES = ['container-tools', 'mariadb', 'redis', 'virt'] - - -class Build(command.Command): - """Build tripleo container images with tripleo-ansible.""" - - log = logging.getLogger(__name__ + ".Build") - - auth_required = False - identified_images = list() - image_parents = collections.OrderedDict() - image_paths = dict() - - def get_parser(self, prog_name): - parser = super(Build, self).get_parser(prog_name) - parser.add_argument( - "--authfile", - dest="authfile", - metavar="", - default=DEFAULT_ENV_AUTHFILE, - help=_( - "Path of the authentication file. Use REGISTRY_AUTH_FILE " - "environment variable to override. (default: %(default)s)" - ), - ) - parser.add_argument( - "--base", - dest="base", - metavar="", - default="centos:stream9", - help=_( - "Base image name, with optional version. Can be 'centos:8', " - "base name image will be 'centos' but 'centos:8' will be " - "pulled to build the base image. (default: %(default)s)" - ), - ) - parser.add_argument( - "--config-file", - dest="config_file", - metavar="", - default=DEFAULT_CONFIG, - help=_( - "YAML config file specifying the images to build. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--config-path", - dest="config_path", - metavar="", - default=constants.CONTAINER_IMAGES_BASE_PATH, - help=_( - "Base configuration path. This is the base path for all " - "container-image files. The defined containers must reside " - "within a 'tcib' folder that is in this path. If this option " - "is set, the default path for will be modified. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--distro", - dest="distro", - default="centos", - metavar="", - help=_( - "Distro name which sets tcib_distro, if undefined the " - "system will build using the host distro. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--release", - dest="release", - default="9", - metavar="", - help=_( - "Distro major release version which sets tcib_release. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--tcib-extras", - dest="tcib_extras", - default=None, - metavar="", - action='append', - help=_( - "TCIB extra variables you want to pass. They can be later " - "used within TCIB files as conditonals. Can be passed " - "multiple times(default: %(default)s)" - ), - ) - parser.add_argument( - "--exclude", - dest="excludes", - metavar="", - default=[], - action="append", - help=_( - "Name of one container to match against the list of " - "containers to be built to skip. Should be specified " - "multiple times when skipping multiple containers. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--extra-config", - dest="extra_config", - metavar="", - help=_( - "Apply additional options from a given configuration YAML " - "file. This will apply to all containers built. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--namespace", - dest="namespace", - metavar="", - default=constants.DEFAULT_CONTAINER_NAMESPACE, - help=_("Container registry namespace (default: %(default)s)"), - ) - parser.add_argument( - "--registry", - dest="registry", - metavar="", - default="localhost", - help=_("Container registry URL (default: %(default)s)"), - ) - parser.add_argument( - "--skip-build", - dest="skip_build", - default=False, - action="store_true", - help=_( - "Skip or not the build of the images (default: %(default)s)" - ), - ) - parser.add_argument( - "--tag", - dest="tag", - metavar="", - default="latest", - help=_("Image tag (default: %(default)s)"), - ) - parser.add_argument( - "--prefix", - dest="prefix", - metavar="", - default="openstack", - help=_("Image prefix. (default: %(default)s)"), - ) - parser.add_argument( - "--push", - dest="push", - default=False, - action="store_true", - help=_( - "Enable image push to a given registry. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--label", - dest="labels", - metavar="", - default=[], - action="append", - help=_( - "Add labels to the containers. This option can be " - "specified multiple times. Each label is a key=value " - "pair." - ), - ) - parser.add_argument( - "--volume", - dest="volumes", - metavar="", - default=[ - "/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z", - ], - action="append", - help=_( - "Container bind mount used when building the image. Should " - "be specified multiple times if multiple volumes." - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--repo-dir", - dest="repo_dir", - metavar="", - default="/etc/yum.repos.d", - help=_( - "Define a custom directory containing the repo files. This is " - "useful when building containers from a different OS release." - ), - ) - parser.add_argument( - "--work-dir", - dest="work_dir", - metavar="", - default="/tmp/container-builds", - help=_( - "TripleO container builds directory, storing configs and " - "logs for each image and its dependencies. " - "(default: %(default)s)" - ), - ) - parser.add_argument( - "--rhel-modules", - dest="rhel_modules", - metavar="", - default=None, - help=_("A comma separated list of RHEL modules to enable with " - "their version. Example: 'mariadb:10.3,virt:8.3'."), - ) - parser.add_argument( - "--build-timeout", - dest="build_timeout", - metavar="", - default=None, - type=int, - help=_("Build timeout in seconds.") - ) - return parser - - def imagename_to_regex(self, imagename): - if not imagename: - return - # remove any namespace from the start - imagename = imagename.split("/")[-1] - - # remove any tag from the end - imagename = imagename.split(":")[0] - - # remove supported base names from the start - imagename = re.sub(r"^(openstack|centos|rhel|ubi8)-", "", imagename) - - # remove install_type from the start - imagename = re.sub(r"^(binary|source|rdo|rhos)-", "", imagename) - - # what results should be acceptable as a regex to build one image - return imagename - - def build_tree(self, path, tree=""): - content = [] - path = os.path.join(path, tree) - - (cur_path, children, _) = next(os.walk(path)) - for child in children: - val = self.build_tree(cur_path, child) - if val: - content.append(val) - - if content: - if tree: - return {tree: content} - return content - - return tree - - def index_images(self, path): - for root, __, files in os.walk(path): - if [i for i in files if i.endswith(("yaml", "yml"))]: - self.identified_images.append(os.path.basename(root)) - - def find_image(self, name, path, base_image): - """Find an image and load its config. - - This will traverse a directory structure looking for an image - directory, when found all configs will be loaded lexically and - returned a single Dictionary. - - :param name: Container name. - :type name: String. - :param path: Directory path to traverse. - :type path: String. - :param base: Name of base container image. - :type base: String. - :returns: Dictionary - """ - - container_vars = dict() - for root, dirs, files in os.walk(path): - if os.path.basename(root) == name: - for file_name in sorted(files): - if file_name.endswith(("yaml", "yml")): - _option_file = os.path.join(root, file_name) - self.log.debug( - "reading option file: {}".format(_option_file) - ) - with open(_option_file) as f: - _options = yaml.safe_load(f) - if _options: - container_vars.update(_options) - - base_dir = root - while base_dir != os.sep: - base_dir = os.path.dirname(base_dir) - base_files = [ - i - for i in os.listdir(base_dir) - if i.endswith(("yaml", "yml")) - ] - if base_files: - self.image_parents[name] = os.path.basename( - base_dir - ) - break - else: - self.image_parents[name] = base_image - else: - return container_vars - - def rectify_excludes(self, images_to_prepare): - """Build a dynamic exclude list. - - Using the identified images, we check against our expected images - to build a dynamic exclusion list which will extend the user provided - excludes. - - :param images_to_prepare: List of expected images. - :type images_to_prepare: List. - :returns: List - """ - - excludes = list() - for image in self.identified_images: - if image not in images_to_prepare: - excludes.append(image) - else: - return excludes - - def make_dir_tree(self, tree, work_dir): - """Walk the tree then create and catalog all directories. - - As the tree is walked, containers are identified, directories are - created and the Containerfile image relationship is recorded for later - lookup. - - :param tree: List of expected images. - :type tree: List. - :param work_dir: Work directory path. - :type work_dir: String. - """ - - if isinstance(tree, list): - for item in tree: - self.make_dir_tree(tree=item, work_dir=work_dir) - elif isinstance(tree, dict): - for key, value in tree.items(): - self.image_paths[key] = os.path.join(work_dir, key) - utils.makedirs(dir_path=self.image_paths[key]) - self.make_dir_tree(tree=value, work_dir=self.image_paths[key]) - elif isinstance(tree, str): - self.image_paths[tree] = os.path.join(work_dir, tree) - utils.makedirs(dir_path=self.image_paths[tree]) - - def process_images(self, expected_images, parsed_args, image_configs): - """Process all of expected images and ensure we have valid config. - - :param expected_images: List of expected images. - :type expected_images: List. - :param parsed_args: Parsed arguments. - :type parsed_args: Object. - :param image_configs: Hash of pre-processed images. - :type image_configs: Dict. - :returns List: - """ - - image_configs = collections.OrderedDict() - config_path_base = os.path.basename( - os.path.abspath(parsed_args.config_path)) - for image in expected_images: - if (image != config_path_base and image not in image_configs): - self.log.debug("processing image config %s", image) - image_config = self.find_image( - image, - self.tcib_config_path, - parsed_args.base - ) - if not image_config: - self.log.error( - "Image processing failure: {}".format(image) - ) - raise RuntimeError( - "Container image specified, but no" - " config was provided. Image: {}".format(image) - ) - image_configs[image] = image_config - - return image_configs - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - self.config_file = os.path.expanduser(parsed_args.config_file) - self.config_path = os.path.expanduser(parsed_args.config_path) - authfile = os.path.expanduser(parsed_args.authfile) - if os.path.exists(authfile): - os.environ["REGISTRY_AUTH_FILE"] = authfile - else: - try: - del os.environ["REGISTRY_AUTH_FILE"] - except KeyError: - pass - self.tcib_config_path = os.path.join( - self.config_path, DEFAULT_TCIB_CONFIG_BASE - ) - - if not os.path.isdir(self.tcib_config_path): - raise IOError( - "Configuration directory {} was not found.".format( - self.tcib_config_path - ) - ) - - if not os.path.isfile(self.config_file): - self.config_file = os.path.join( - os.path.dirname(self.tcib_config_path), - parsed_args.config_file, - ) - - self.log.debug("take_action({})".format(parsed_args)) - excludes = parsed_args.excludes - images_to_prepare = list() - - # Generate an unique work directory so we can keep configs and logs - # each time we run the command; they'll be stored in work_dir. - work_dir = os.path.join(parsed_args.work_dir, str(uuid.uuid4())) - - # Build a tree of images which have a config; this tree will allow - # to concurrently build images which share a common base. - if not os.path.isdir(self.tcib_config_path): - raise NotFound( - "The path {path} does not exist".format( - path=self.tcib_config_path - ) - ) - images_tree = self.build_tree(path=self.tcib_config_path) - - tree_file = "{tree_file}".format( - tree_file=os.path.join(work_dir, "build-tree.yaml") - ) - utils.makedirs(os.path.dirname(tree_file)) - with open(tree_file, "w") as f: - yaml.safe_dump( - images_tree, f, default_flow_style=False, width=4096 - ) - - self.index_images(path=self.tcib_config_path) - self.make_dir_tree(tree=images_tree, work_dir=work_dir) - - # Make sure the unique work directory exists - if not os.path.exists(work_dir): - self.log.debug( - "Creating container builds workspace in: {}".format(work_dir) - ) - os.makedirs(work_dir) - - if os.path.isfile(self.config_file): - self.log.info( - "Configuration file found: {}".format(self.config_file) - ) - with open(self.config_file, "r") as f: - containers_yaml = yaml.safe_load(f) - - for c in containers_yaml["container_images"]: - entry = dict(c) - if not entry.get("image_source", "") == "tripleo": - continue - image = self.imagename_to_regex(entry.get("imagename")) - if image and image not in excludes: - images_to_prepare.append(image) - else: - self.log.warning( - "Configuration file not found: {}".format(self.config_file) - ) - self.log.warning( - "All identified images will be prepared: {}".format( - self.config_file - ) - ) - images_to_prepare.extend(self.identified_images) - - # NOTE(cloudnull): Ensure all dependent images are in the build - # tree. Once an image has been added to the - # prepare array, we walk it back and ensure - # dependencies are also part of the build - # process. - image_configs = collections.OrderedDict() # hash - image_configs.update( - self.process_images( - expected_images=images_to_prepare, - parsed_args=parsed_args, - image_configs=image_configs - ) - ) - _parents = self.process_images( - expected_images=list(self.image_parents.values()), - parsed_args=parsed_args, - image_configs=image_configs - ) - for key, value in _parents.items(): - image_configs[key] = value - image_configs.move_to_end(key, last=False) - images_to_prepare.insert(0, key) - - if "os" in image_configs: # Second image prepared if found - image_configs.move_to_end("os", last=False) - - if "base" in image_configs: # First image prepared if found - image_configs.move_to_end("base", last=False) - - self.log.debug( - "Images being prepared: {}".format( - [i[0] for i in [(k, v) for k, v in image_configs.items()]] - ) - ) - - tcib_inventory = {"all": {"hosts": {}}} - tcib_inventory_hosts = tcib_inventory["all"]["hosts"] - for image, image_config in [(k, v) for k, v in image_configs.items()]: - self.log.debug("processing image config {}".format(image)) - if image == "base": - image_name = image_from = parsed_args.base - else: - image_name = self.image_parents.get(image, image) - image_from = ( - "{registry}/{namespace}" - "/{prefix}-{image}:{tag}".format( - registry=parsed_args.registry, - namespace=parsed_args.namespace, - prefix=parsed_args.prefix, - image=image_name, - tag=parsed_args.tag, - ) - ) - - image_parsed_name = self.imagename_to_regex(imagename=image) - - # For each image we will generate Dockerfiles in the work_dir - # following a specific directory structured per image - image_config.update( - { - "workdir": self.image_paths.get(image, work_dir), - "tcib_distro": parsed_args.distro, - "tcib_release": parsed_args.release, - "tcib_path": self.image_paths.get(image, work_dir), - "tcib_meta": {"name": image_parsed_name}, - "ansible_connection": "local", - } - ) - if parsed_args.tcib_extras: - for extras in parsed_args.tcib_extras: - key, value = extras.split('=') - # Enforce format in order to get some consistency - if not key.startswith('tcib_'): - raise ValueError('Wrong key format {key}. ' - 'We expect "tcib_" prefix, such as ' - 'tcib_{key}'.format(key=key)) - image_config[key] = value - - if parsed_args.rhel_modules: - rhel_modules = {} - for module in parsed_args.rhel_modules.split(','): - try: - name, version = module.split(':', 1) - except Exception: - raise ValueError('Wrong format for --rhel-modules, ' - 'must be a comma separated list of ' - ':') - if name not in SUPPORTED_RHEL_MODULES: - raise ValueError('{} is not part of supported modules' - ' {}'.format(name, - SUPPORTED_RHEL_MODULES)) - rhel_modules.update({name: version}) - image_config['tcib_rhel_modules'] = rhel_modules - - if parsed_args.labels: - _desc = "OpenStack Platform {}".format(image_parsed_name) - label_data = image_config['tcib_labels'] = { - "tcib_managed": True, - "maintainer": "OpenStack TripleO Team", - "description": _desc, - "summary": _desc, - "io.k8s.display-name": _desc, - } - for item in parsed_args.labels: - key, value = item.split("=", 1) - label_data[key] = value % dict( - registry=parsed_args.registry, - namespace=parsed_args.namespace, - prefix=parsed_args.prefix, - image=image_name, - tag=parsed_args.tag, - name=image_parsed_name, - ) - - # NOTE(cloudnull): Check if the reference config has a valid - # "from" option. If the reference "from" - # option is valid, it will be used. - image_config["tcib_from"] = image_config.get( - "tcib_from", image_from - ) - - tcib_inventory_hosts[image_parsed_name] = image_config - - var_file = "{image_name}.yaml".format( - image_name=os.path.join( - image_config["tcib_path"], image_parsed_name, - ) - ) - utils.makedirs(os.path.dirname(var_file)) - with open(var_file, "w") as f: - yaml.safe_dump( - image_config, f, default_flow_style=False, width=4096 - ) - - with utils.TempDirs() as tmp: - playbook = os.path.join(tmp, "tripleo-multi-playbook.yaml") - playdata = [ - { - "name": "Generate localhost facts", - "connection": "local", - "hosts": "localhost", - "gather_facts": True, - } - ] - generation_playbook = { - "name": "Generate container file(s)", - "connection": "local", - "hosts": "all", - "gather_facts": False, - "roles": [{"role": "tripleo_container_image_build"}], - } - if parsed_args.extra_config: - if not os.path.exists(parsed_args.extra_config): - raise IOError( - "The file provided by does not " - "exist, check you settings and try again." - ) - else: - with open(parsed_args.extra_config) as f: - generation_playbook["vars"] = yaml.safe_load(f) - - playdata.append(generation_playbook) - - with open(playbook, "w") as f: - yaml.safe_dump( - playdata, f, default_flow_style=False, width=4096 - ) - - utils.run_ansible_playbook( - playbook=playbook, - inventory=tcib_inventory, - workdir=tmp, - playbook_dir=tmp, - extra_env_variables={ - "ANSIBLE_FORKS": len(tcib_inventory_hosts.keys()), - "ANSIBLE_PYTHON_INTERPRETER": "{{ansible_playbook_python}}" - }, - verbosity=utils.playbook_verbosity(self=self), - ) - - # Ensure anything not intended to be built is excluded - excludes.extend(self.rectify_excludes(images_to_prepare)) - self.log.info("Images being excluded: {}".format(excludes)) - volumes = parsed_args.volumes - volumes.append( - f"{parsed_args.repo_dir}:/etc/distro.repos.d:z" - ) - - if not parsed_args.skip_build: - bb = buildah.BuildahBuilder( - work_dir=work_dir, - deps=images_tree, - base=parsed_args.prefix, - img_type=False, - tag=parsed_args.tag, - namespace=parsed_args.namespace, - registry_address=parsed_args.registry, - push_containers=parsed_args.push, - volumes=volumes, - excludes=list(set(excludes)), - build_timeout=parsed_args.build_timeout, - debug=self.app.options.debug - ) - try: - bb.build_all() - except SystemError as exp: - self.log.error( - "Buildah failed with the following error: {}".format(exp) - ) - raise SystemError(exp) - - -class HotFix(command.Command): - """Hotfix tripleo container images with tripleo-ansible.""" - - log = logging.getLogger(__name__ + ".HotFix") - - def get_parser(self, prog_name): - parser = super(HotFix, self).get_parser(prog_name) - parser.add_argument( - "--image", - dest="images", - metavar="", - default=[], - action="append", - required=True, - help=_( - "Fully qualified reference to the source image to be " - "modified. Can be specified multiple times (one per " - "image) (default: %(default)s)." - ), - ) - parser.add_argument( - "--rpms-path", - dest="rpms_path", - metavar="", - required=True, - help=_("Path containing RPMs to install (default: %(default)s)."), - ) - parser.add_argument( - "--tag", - dest="tag", - metavar="", - default="latest", - help=_("Image hotfix tag (default: %(default)s)"), - ) - return parser - - def take_action(self, parsed_args): - logging.register_options(CONF) - logging.setup(CONF, '') - with utils.TempDirs() as tmp: - tasks = list() - for image in parsed_args.images: - tasks.append( - { - "name": "include ansible-role-tripleo-modify-image", - "import_role": {"name": "tripleo-modify-image"}, - "vars": { - "container_build_tool": "buildah", - "tasks_from": "rpm_install.yml", - "source_image": image, - "rpms_path": parsed_args.rpms_path, - "modified_append_tag": "-{}".format( - parsed_args.tag - ), - "modify_dir_path": tmp, - }, - } - ) - - playbook = os.path.join(tmp, "tripleo-hotfix-playbook.yaml") - playdata = { - "name": "Generate hotfixs", - "connection": "local", - "hosts": "localhost", - "gather_facts": False, - "tasks": tasks, - } - - with open(playbook, "w") as f: - yaml.safe_dump( - [playdata], f, default_flow_style=False, width=4096 - ) - - self.log.debug("Running ansible playbook {}".format(playbook)) - utils.run_ansible_playbook( - playbook=playbook, - inventory="localhost", - workdir=tmp, - playbook_dir=tmp, - verbosity=utils.playbook_verbosity(self=self), - ) diff --git a/tripleoclient/workflows/__init__.py b/tripleoclient/workflows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleoclient/workflows/baremetal.py b/tripleoclient/workflows/baremetal.py deleted file mode 100644 index cf18c8773..000000000 --- a/tripleoclient/workflows/baremetal.py +++ /dev/null @@ -1,654 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import socket -import netaddr -import tempfile - -import ironic_inspector_client -from oslo_concurrency import processutils -from oslo_utils import units -from tripleo_common import exception as tc_exceptions -from tripleo_common.utils import nodes as node_utils - -from tripleoclient import constants -from tripleoclient import exceptions -from tripleoclient import utils - -LOG = logging.getLogger(__name__) - - -def validate_nodes(clients, nodes_json): - """Validate nodes. - - :param clients: Application client object. - :type clients: Object - - :param nodes_json: - :type nodes_json: Object - - :returns: Boolean - """ - validated_nodes = node_utils.validate_nodes(nodes_json) - if not validated_nodes: - return True - raise exceptions.RegisterOrUpdateError(validated_nodes) - - -def register_or_update(clients, nodes_json, kernel_name=None, - ramdisk_name=None, instance_boot_option=None, - boot_mode=None): - """Node Registration or Update - - :param clients: Application client object. - :type clients: Object - - :param nodes_json: - :type nodes_json: Object - - :param kernel_name: Kernel to use - :type kernel_name: String - - :param ramdisk_name: RAMDISK to use - :type ramdisk_name: String - - :param instance_boot_option: Whether to set instances for booting from - local hard drive (local) or network - (netboot). - :type instance_boot_option: String - :param boot_mode: Whether to set the boot mode to UEFI (uefi) or legacy - BIOS (bios) - :type boot_mode: String - - :returns: List - """ - - for node in nodes_json: - caps = node.get('capabilities', {}) - caps = node_utils.capabilities_to_dict(caps) - if instance_boot_option: - caps.setdefault('boot_option', instance_boot_option) - if boot_mode: - caps.setdefault('boot_mode', boot_mode) - node['capabilities'] = node_utils.dict_to_capabilities(caps) - - registered_nodes = node_utils.register_all_nodes( - nodes_json, - client=clients.baremetal, - kernel_name=kernel_name, - ramdisk_name=ramdisk_name) - if not isinstance(registered_nodes, list): - raise exceptions.RegisterOrUpdateError(registered_nodes) - else: - for node in registered_nodes: - if node.provision_state == 'enroll': - clients.baremetal.node.set_provision_state( - node_uuid=node.uuid, - state='manage' - ) - print('Successfully registered node UUID {}'.format(node.uuid)) - else: - print('Node UUID {} is already registered'.format(node.uuid)) - - return registered_nodes - - -def introspect(clients, node_uuids, run_validations, concurrency, - node_timeout, max_retries, retry_timeout, verbosity=0): - """Introspect Baremetal Nodes - - :param clients: Application client object. - :type clients: Object - - :param node_uuids: List of instance UUID(s). - :type node_uuids: List - - :param run_validations: Enable or disable validations - :type run_validations: Boolean - - :param concurrency: concurrency level - :type concurrency: Integer - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-baremetal-introspect.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=verbosity, - extra_vars={ - "node_uuids": node_uuids, - "run_validations": run_validations, - "concurrency": concurrency, - "node_timeout": node_timeout, - "max_retries": max_retries, - "retry_timeout": retry_timeout, - - } - ) - - print('Successfully introspected nodes: {}'.format(node_uuids)) - - -def introspect_manageable_nodes(clients, run_validations, concurrency, - node_timeout, max_retries, retry_timeout, - verbosity=0): - """Introspect all manageable nodes - - :param clients: Application client object. - :type clients: Object - - :param run_validations: Enable or disable validations - :type run_validations: Boolean - - :param concurrency: Concurrency level - :type concurrency: Integer - - :param node_timeout: Node timeout for introspection - :type node_timeout: Integer - - :param max_retries: Max retries for introspection - :type max_retries: Integer - - :param retry_timeout: Max timeout to wait between retries - :type retry_timeout: Integer - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - introspect( - clients=clients, - node_uuids=[ - i.uuid for i in clients.baremetal.node.list() - if i.provision_state == "manageable" and not i.maintenance - ], - run_validations=run_validations, - concurrency=concurrency, - node_timeout=node_timeout, - max_retries=max_retries, - retry_timeout=retry_timeout, - verbosity=verbosity - ) - - -def _configure_boot(clients, node_uuid, - kernel_name=None, - ramdisk_name=None, - instance_boot_option=None, - boot_mode=None): - baremetal_client = clients.baremetal - image_ids = {'kernel': kernel_name, 'ramdisk': ramdisk_name} - node = baremetal_client.node.get(node_uuid) - capabilities = node.properties.get('capabilities', {}) - capabilities = node_utils.capabilities_to_dict(capabilities) - if instance_boot_option is not None: - capabilities['boot_option'] = instance_boot_option - if boot_mode is not None: - capabilities['boot_mode'] = boot_mode - capabilities = node_utils.dict_to_capabilities(capabilities) - - baremetal_client.node.update(node.uuid, [ - { - 'op': 'add', - 'path': '/properties/capabilities', - 'value': capabilities, - }, - { - 'op': 'add', - 'path': '/driver_info/deploy_ramdisk', - 'value': image_ids['ramdisk'], - }, - { - 'op': 'add', - 'path': '/driver_info/deploy_kernel', - 'value': image_ids['kernel'], - }, - { - 'op': 'add', - 'path': '/driver_info/rescue_ramdisk', - 'value': image_ids['ramdisk'], - }, - { - 'op': 'add', - 'path': '/driver_info/rescue_kernel', - 'value': image_ids['kernel'], - }, - ]) - - -def _apply_root_device_strategy(clients, node_uuid, strategy, - minimum_size=4, overwrite=False): - node = clients.baremetal.node.get(node_uuid) - if node.properties.get('root_device') and not overwrite: - # This is a correct situation, we still want to allow people to - # fine-tune the root device setting for a subset of nodes. - # However, issue a warning, so that they know which nodes were not - # updated during this run. - LOG.warning('Root device hints are already set for node %s ' - 'and overwriting is not requested, skipping', - node.uuid) - LOG.warning('You may unset them by running $ openstack baremetal node ' - 'unset --properties root_device %s', - node.uuid) - return - - inspector_client = clients.baremetal_introspection - baremetal_client = clients.baremetal - try: - data = inspector_client.get_data(node.uuid) - except ironic_inspector_client.ClientError: - raise exceptions.RootDeviceDetectionError( - 'No introspection data found for node %s, ' - 'root device cannot be detected' % node.uuid) - except AttributeError: - raise RuntimeError('Ironic inspector client version 1.2.0 or ' - 'newer is required for detecting root device') - - try: - disks = data['inventory']['disks'] - except KeyError: - raise exceptions.RootDeviceDetectionError( - 'Malformed introspection data for node %s: ' - 'disks list is missing' % node.uuid) - - minimum_size *= units.Gi - disks = [d for d in disks if d.get('size', 0) >= minimum_size] - - if not disks: - raise exceptions.RootDeviceDetectionError( - 'No suitable disks found for node %s' % node.uuid) - - for disk in disks: - # NOTE(TheJulia): An md device should not explicitly forced, - # If software raid, Ironic knows exactly what it is doing. - if 'md' in disk['name']: - LOG.warning('A "md" device %(md)s, signifying software RAID, ' - 'has been detected. If software raid is in ' - 'use, this should not necessarilly need to ' - 'be set or used if software raid is being ' - 'managed by Ironic, unless the operator knows' - 'better due to site configuration. ' - 'Unfortunately, we cannot guess a ' - 'root deivce hint when Ironic managing a ' - 'software raid device. If this is in error ' - 'please set an explicit root device hint using ' - '$ openstack baremetal node set --property ' - 'root_device=/dev/', - {'md': disk['name']}) - return - - if strategy == 'smallest': - # NOTE(TheJulia): This is redundant, Ironic does this by default, - # and maintains a list of invalid devices which would show up in a - # the introspetion data which cannot be used. Such as flash cards. - disks.sort(key=lambda d: d['size']) - root_device = disks[0] - elif strategy == 'largest': - disks.sort(key=lambda d: d['size'], reverse=True) - root_device = disks[0] - else: - disk_names = [x.strip() for x in strategy.split(',')] - disks = {d['name']: d for d in disks} - for candidate in disk_names: - try: - root_device = disks['/dev/%s' % candidate] - except KeyError: - continue - else: - break - else: - raise exceptions.RootDeviceDetectionError( - 'Cannot find a disk with any of names %(strategy)s ' - 'for node %(node)s' % - {'strategy': strategy, 'node': node.uuid}) - - hint = None - for hint_name in ('wwn_with_extension', 'wwn', 'serial'): - if root_device.get(hint_name): - hint = {hint_name: root_device[hint_name]} - break - - if hint is None: - # I don't think it might actually happen, but just in case - raise exceptions.RootDeviceDetectionError( - 'Neither WWN nor serial number are known for device %(dev)s ' - 'on node %(node)s; root device hints cannot be used' % - {'dev': root_device['name'], 'node': node.uuid}) - - # During the introspection process we got local_gb assigned according - # to the default strategy. Now we need to update it. - new_size = root_device['size'] / units.Gi - # This -1 is what we always do to account for partitioning - new_size -= 1 - - # NOTE(TheJulia): local_gb is only used for partition images, - # and is ignored with Whole Disk Images. With movement to Whole - # Disk images, this is tech debt and should be removed at some point. - baremetal_client.node.update( - node.uuid, - [{'op': 'add', 'path': '/properties/root_device', 'value': hint}, - {'op': 'add', 'path': '/properties/local_gb', 'value': new_size}]) - - LOG.info('Updated root device for node %(node)s, new device ' - 'is %(dev)s, new local_gb is %(local_gb)d', - {'node': node.uuid, 'dev': root_device, 'local_gb': new_size}) - - -def create_raid_configuration(clients, node_uuids, configuration, - verbosity=0): - """Create RAID configuration on nodes. - - :param clients: application client object. - :type clients: Object - - :param node_uuids: List of instance UUID(s). - :type node_uuids: List - - :param configuration: RAID configuration object. - :type configuration: Object - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-baremetal-raid.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=verbosity, - extra_vars={ - 'node_uuids': node_uuids, - 'raid_configuration': configuration - } - ) - - print('Successfully configured RAID for nodes: {}'.format(node_uuids)) - - -def _existing_ips(existing_nodes): - result = set() - for node in existing_nodes: - try: - handler = node_utils.find_driver_handler(node['driver']) - except tc_exceptions.InvalidNode: - LOG.warning('No known handler for driver %(driver)s of ' - 'node %(node)s, ignoring it', - {'driver': node['driver'], 'node': node['uuid']}) - continue - - address_field = handler.convert_key('pm_addr') - if address_field is None: - LOG.info('No address field for driver %(driver)s of ' - 'node %(node)s, ignoring it', - {'driver': node['driver'], 'node': node['uuid']}) - continue - - address = node['driver_info'].get(address_field) - if address is None: - LOG.warning('No address for node %(node)s, ignoring it', - {'node': node['uuid']}) - continue - - try: - ip = socket.gethostbyname(address) - except socket.gaierror as exc: - LOG.warning('Cannot resolve %(field)s "%(value)s" ' - 'for node %(node)s: %(error)s', - {'field': address_field, 'value': address, - 'node': node['uuid'], 'error': exc}) - continue - - port_field = handler.convert_key('pm_port') - port = node['driver_info'].get(port_field, handler.default_port) - if port is not None: - port = int(port) - - LOG.debug('Detected existing BMC at %s with port %s', ip, port) - result.add((ip, port)) - - return result - - -def _ip_address_list(ip_addresses): - if isinstance(ip_addresses, str): - return [str(ip) for ip in - netaddr.IPNetwork(ip_addresses).iter_hosts()] - return ip_addresses - - -def _get_candidate_nodes(ip_addresses, ports, - credentials, existing_nodes): - existing = _existing_ips(existing_nodes) - try: - ip_addresses = _ip_address_list(ip_addresses) - except netaddr.AddrFormatError as exc: - LOG.error("Cannot parse network address: %s", exc) - raise - - result = [] - # NOTE(dtantsur): we iterate over IP addresses last to avoid - # spamming the same BMC with too many requests in a row. - for username, password in credentials: - for port in ports: - port = int(port) - for ip in ip_addresses: - if (ip, port) in existing or (ip, None) in existing: - LOG.info('Skipping existing node %s:%s', ip, port) - continue - - result.append({'ip': ip, 'username': username, - 'password': password, 'port': port}) - - return result - - -def _probe_node(ip, port, username, password, - attempts=2, ipmi_driver='ipmi'): - # TODO(dtantsur): redfish support - LOG.debug('Probing for IPMI BMC: %s@%s:%s', - username, ip, port) - - with tempfile.NamedTemporaryFile(mode='wt') as fp: - fp.write(password or '\0') - fp.flush() - - try: - # TODO(dtantsur): try also IPMI v1.5 - processutils.execute('ipmitool', '-I', 'lanplus', - '-H', ip, '-L', 'ADMINISTRATOR', - '-p', str(port), '-U', username, - '-f', fp.name, 'power', 'status', - attempts=attempts) - except processutils.ProcessExecutionError as exc: - LOG.debug('Probing %(ip)s failed: %(exc)s', - {'ip': ip, 'exc': exc}) - return None - - LOG.info('Found a BMC on %(ip)s with user %(user)s', - {'ip': ip, 'user': username}) - return { - 'pm_type': ipmi_driver, - 'pm_addr': ip, - 'pm_user': username, - 'pm_password': password, - 'pm_port': port, - } - - -def discover_and_enroll(clients, ip_addresses, credentials, kernel_name, - ramdisk_name, instance_boot_option, - existing_nodes=None, ports=None): - """Discover nodes and enroll baremetal nodes. - - :param clients: application client object. - :type clients: Object - - :param ip_addresses: List of IP addresses. - :type ip_addresses: List || String - - :param credentials: Credential information object - :type credentials: Tuple - - :param kernel_name: Kernel to use - :type kernel_name: String - - :param ramdisk_name: RAMDISK to use - :type ramdisk_name: String - - :param instance_boot_option: Boot options to use - :type instance_boot_option: String - - :param existing_nodes: List of nodes already discovered. If this is - undefined this object will be set to an empty - array. - :type existing_nodes: List - - :param ports: List of ports, if no ports are provided the list of ports - will be limted to [623]. - :type ports: List - - :returns: List - """ - - if not ports: - ports = [623] - - if not existing_nodes: - existing_nodes = list() - - candidate_nodes = _get_candidate_nodes( - ip_addresses, - ports, - credentials, - existing_nodes - ) - probed_nodes = list() - for node in candidate_nodes: - probed_nodes.append(_probe_node(**node)) - print('Successfully probed node IP {}'.format(node['ip'])) - - return register_or_update( - clients=clients, - nodes_json=probed_nodes, - instance_boot_option=instance_boot_option, - kernel_name=kernel_name, - ramdisk_name=ramdisk_name - ) - - -def apply_bios_configuration(node_uuids, configuration, verbosity=0): - """Apply BIOS settings on nodes. - - :param node_uuids: List of instance UUID(s). - :type node_uuids: List - - :param configuration: BIOS configuration object. - :type configuration: Object - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - print('Applying BIOS settings for given nodes, this may take time') - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-baremetal-bios.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=verbosity, - extra_vars={ - 'node_uuids': node_uuids, - 'bios_configuration': configuration - } - ) - - print('Successfully applied the BIOS for nodes: {}'.format(node_uuids)) - - -def apply_bios_configuration_on_manageable_nodes(clients, configuration, - verbosity=0): - """Apply BIOS settings on manageable nodes. - - :param clients: application client object. - :type clients: Object - - :param configuration: BIOS configuration object. - :type configuration: Object - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - apply_bios_configuration( - node_uuids=[ - i.uuid for i in clients.baremetal.node.list() - if i.provision_state == "manageable" and not i.maintenance - ], - configuration=configuration, - verbosity=verbosity - ) - - -def reset_bios_configuration(node_uuids, verbosity=0): - """Reset BIOS settings on nodes. - - :param node_uuids: List of instance UUID(s). - :type node_uuids: List - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - with utils.TempDirs() as tmp: - utils.run_ansible_playbook( - playbook='cli-baremetal-bios-reset.yaml', - inventory='localhost,', - workdir=tmp, - playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=verbosity, - extra_vars={ - 'node_uuids': node_uuids - } - ) - - print('Successfully reset the BIOS for nodes: {}'.format(node_uuids)) - - -def reset_bios_configuration_on_manageable_nodes(clients, verbosity=0): - """Reset BIOS settings on manageable nodes. - - :param clients: application client object. - :type clients: Object - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - reset_bios_configuration( - node_uuids=[ - i.uuid for i in clients.baremetal.node.list() - if i.provision_state == "manageable" and not i.maintenance - ], - verbosity=verbosity - ) diff --git a/tripleoclient/workflows/deployment.py b/tripleoclient/workflows/deployment.py deleted file mode 100644 index 2b3fcc1cb..000000000 --- a/tripleoclient/workflows/deployment.py +++ /dev/null @@ -1,528 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import getpass -import git -import os -import shutil -import yaml - -from openstackclient import shell -from tripleo_common.utils import heat as tc_heat_utils -from tripleo_common.utils import overcloudrc as rc_utils - -from tripleoclient.constants import ANSIBLE_TRIPLEO_PLAYBOOKS -from tripleoclient.constants import CLOUD_HOME_DIR -from tripleoclient.constants import DEFAULT_WORK_DIR -from tripleoclient import exceptions -from tripleoclient import utils - - -_WORKFLOW_TIMEOUT = 360 # 6 * 60 seconds - - -def create_overcloudrc(stack_name, endpoint, admin_vip, rc_params, - no_proxy='', output_dir=CLOUD_HOME_DIR): - overcloudrcs = rc_utils._create_overcloudrc_from_outputs( - stack_name, endpoint, admin_vip, no_proxy, rc_params['password'], - rc_params['region']) - rcpath = os.path.join(output_dir, '%src' % stack_name) - with open(rcpath, 'w') as rcfile: - rcfile.write(overcloudrcs['overcloudrc']) - os.chmod(rcpath, 0o600) - return os.path.abspath(rcpath) - - -def deploy_without_plan(clients, stack_name, template, - files, env_files, - log, - working_dir): - orchestration_client = clients.orchestration - log.info("Performing Heat stack create") - marker = None - set_deployment_status(stack_name, - status='DEPLOYING', - working_dir=working_dir) - stack_args = { - 'stack_name': stack_name, - 'template': template, - 'environment_files': env_files, - 'files': files} - try: - orchestration_client.stacks.create(**stack_args) - print("Success.") - except Exception: - set_deployment_status(stack_name, - status='DEPLOY_FAILED', - working_dir=working_dir) - raise - - create_result = utils.wait_for_stack_ready( - orchestration_client, stack_name, marker) - if not create_result: - shell.OpenStackShell().run( - ["stack", "failures", "list", '--long', stack_name]) - set_deployment_status( - stack_name, - status='DEPLOY_FAILED', - working_dir=working_dir - ) - raise exceptions.DeploymentError("Heat Stack create failed.") - - -def get_overcloud_hosts(stack, ssh_network, working_dir): - ips = [] - role_net_ip_map = utils.get_role_net_ip_map(working_dir) - excluded_ips = utils.get_excluded_ip_addresses(working_dir) - if not role_net_ip_map: - raise exceptions.DeploymentError( - 'No overcloud hosts were found in the current stack.' - ' Check the stack name and try again.' - ) - for net_ip_map in role_net_ip_map.values(): - # get a copy of the lists of ssh_network and ctlplane ips - # as excluded_ips will only be the ctlplane ips, we need - # both lists to determine which to actually exclude - net_ips = copy.copy(net_ip_map.get(ssh_network, [])) - ctlplane_ips = copy.copy(net_ip_map.get('ctlplane', [])) - - excluded_ctlplane_ips = \ - [ip for ip in ctlplane_ips if ip in excluded_ips] - - # for each excluded ctlplane ip, remove the corresponding - # ssh_network ip at that same index in the net_ips list - for bcip in excluded_ctlplane_ips: - if not bcip: - continue - index = ctlplane_ips.index(bcip) - ctlplane_ips.pop(index) - net_ips.pop(index) - - ips.extend(net_ips) - - # ensure there are no empty strings in IP list (LP1990566) - ips = [i for i in ips if i] - return ips - - -def get_hosts_and_enable_ssh_admin(stack_name, overcloud_ssh_network, - overcloud_ssh_user, overcloud_ssh_key, - overcloud_ssh_port_timeout, - working_dir, verbosity=0, - heat_type='pod'): - """Enable ssh admin access. - - Get a list of hosts from a given stack and enable admin ssh across all of - them. - - :param stack_name: Stack name. - :type stack_name: String - - :param overcloud_ssh_network: Network id. - :type overcloud_ssh_network: String - - :param overcloud_ssh_user: SSH access username. - :type overcloud_ssh_user: String - - :param overcloud_ssh_key: SSH access key. - :type overcloud_ssh_key: String - - :param overcloud_ssh_port_timeout: Ansible connection timeout in seconds - :type overcloud_ssh_port_timeout: Int - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - hosts = get_overcloud_hosts(stack_name, overcloud_ssh_network, working_dir) - if [host for host in hosts if host]: - enable_ssh_admin( - stack_name, - hosts, - overcloud_ssh_user, - overcloud_ssh_key, - overcloud_ssh_port_timeout, - working_dir, - verbosity=verbosity, - heat_type=heat_type - ) - else: - raise exceptions.DeploymentError( - 'Cannot find any hosts on "{}" in network "{}"'.format( - stack_name, - overcloud_ssh_network - ) - ) - - -def enable_ssh_admin(stack_name, hosts, ssh_user, ssh_key, timeout, - working_dir, verbosity=0, heat_type='pod'): - """Run enable ssh admin access playbook. - - :param stack_name: Stack name. - :type stack_name: String - - :param hosts: Machines to connect to. - :type hosts: List - - :param ssh_user: SSH access username. - :type ssh_user: String - - :param ssh_key: SSH access key. - :type ssh_key: String - - :param timeout: Ansible connection timeout in seconds - :type timeout: int - - :param verbosity: Verbosity level - :type verbosity: Integer - """ - - print( - 'Enabling ssh admin (tripleo-admin) for hosts: {}.' - '\nUsing ssh user "{}" for initial connection.' - '\nUsing ssh key at "{}" for initial connection.' - '\n\nStarting ssh admin enablement playbook'.format( - hosts, - ssh_user, - ssh_key - ) - ) - try: - if tc_heat_utils.heatclient: - tc_heat_utils.heatclient.save_environment() - playbook = 'cli-enable-ssh-admin.yaml' - ansible_work_dir = os.path.join( - working_dir, os.path.splitext(playbook)[0]) - utils.run_ansible_playbook( - playbook=playbook, - inventory=','.join(hosts), - workdir=ansible_work_dir, - playbook_dir=ANSIBLE_TRIPLEO_PLAYBOOKS, - key=ssh_key, - ssh_user=ssh_user, - verbosity=verbosity, - reproduce_command=True, - extra_vars={ - "ssh_user": ssh_user, - "ssh_servers": hosts, - 'tripleo_cloud_name': stack_name - }, - ansible_timeout=timeout - ) - finally: - if tc_heat_utils.heatclient: - tc_heat_utils.heatclient.restore_environment() - print("Enabling ssh admin - COMPLETE.") - - -def config_download(log, clients, stack_name, ssh_network='ctlplane', - output_dir=None, override_ansible_cfg=None, - timeout=600, verbosity=0, deployment_options=None, - in_flight_validations=False, - ansible_playbook_name='deploy_steps_playbook.yaml', - limit_hosts=None, extra_vars=None, inventory_path=None, - ssh_user='tripleo-admin', tags=None, skip_tags=None, - deployment_timeout=None, forks=None, working_dir=None, - denyed_hostnames=None): - """Run config download. - - :param log: Logging object - :type log: Object - - :param clients: openstack clients - :type clients: Object - - :param stack: Heat Stack object - :type stack: Object - - :param ssh_network: Network named used to access the overcloud. - :type ssh_network: String - - :param output_dir: Path to the output directory. - :type output_dir: String - - :param override_ansible_cfg: Ansible configuration file location. - :type override_ansible_cfg: String - - :param timeout: Ansible connection timeout in seconds. - :type timeout: Integer - - :param verbosity: Ansible verbosity level. - :type verbosity: Integer - - :param deployment_options: Additional deployment options. - :type deployment_options: Dictionary - - :param in_flight_validations: Enable or Disable inflight validations. - :type in_flight_validations: Boolean - - :param ansible_playbook_name: Name of the playbook to execute. - :type ansible_playbook_name: String - - :param limit_hosts: String of hosts to limit the current playbook to. - :type limit_hosts: String - - :param extra_vars: Set additional variables as a Dict or the absolute - path of a JSON or YAML file type. - :type extra_vars: Either a Dict or the absolute path of JSON or YAML - - :param inventory_path: Inventory file or path, if None is provided this - function will perform a lookup - :type inventory_path: String - - :param ssh_user: SSH user, defaults to tripleo-admin. - :type ssh_user: String - - :param tags: Ansible inclusion tags. - :type tags: String - - :param skip_tags: Ansible exclusion tags. - :type skip_tags: String - - :param deployment_timeout: Deployment timeout in minutes. - :type deployment_timeout: Integer - - :param working_dir: Consistent working directory used for generated - ansible files. - :type working_dir: String - """ - - def _log_and_print(message, logger, level='info', print_msg=True): - """Print and log a given message. - - :param message: Message to print and log. - :type message: String - - :param log: Logging object - :type log: Object - - :param level: Log level. - :type level: String - - :param print_msg: Print messages to stdout. - :type print_msg: Boolean - """ - - if print_msg: - print(message) - - log = getattr(logger, level) - log(message) - - if not output_dir: - output_dir = DEFAULT_WORK_DIR - - if not working_dir: - working_dir = utils.get_default_working_dir(stack_name) - - if not deployment_options: - deployment_options = dict() - - if not in_flight_validations: - if skip_tags: - skip_tags = 'opendev-validation,{}'.format(skip_tags) - else: - skip_tags = 'opendev-validation' - - playbook = 'cli-grant-local-access.yaml' - ansible_work_dir = os.path.join( - working_dir, os.path.splitext(playbook)[0]) - utils.run_ansible_playbook( - playbook=playbook, - inventory='localhost,', - workdir=ansible_work_dir, - playbook_dir=ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=verbosity, - reproduce_command=True, - extra_vars={ - 'access_path': output_dir, - 'execution_user': getpass.getuser() - } - ) - - _log_and_print( - message='Checking for excluded hosts from stack: {}'.format( - stack_name - ), - logger=log, - print_msg=(verbosity == 0) - ) - if not limit_hosts: - if denyed_hostnames: - limit_hosts = ( - ':'.join(['!{}'.format(i) for i in denyed_hostnames - if i])) - - _log_and_print( - message='Executing deployment playbook for stack: {}'.format( - stack_name - ), - logger=log, - print_msg=(verbosity == 0) - ) - - stack_work_dir = os.path.join(output_dir, stack_name) - if not inventory_path: - inventory_path = os.path.join(stack_work_dir, - 'inventory') - - if isinstance(ansible_playbook_name, list): - playbooks = [os.path.join(stack_work_dir, p) - for p in ansible_playbook_name] - else: - playbooks = os.path.join(stack_work_dir, ansible_playbook_name) - - utils.run_ansible_playbook( - playbook=playbooks, - inventory=inventory_path, - workdir=output_dir, - playbook_dir=stack_work_dir, - skip_tags=skip_tags, - tags=tags, - ansible_cfg=override_ansible_cfg, - verbosity=verbosity, - ssh_user=ssh_user, - key=utils.get_key(stack_name), - limit_hosts=limit_hosts, - ansible_timeout=timeout, - reproduce_command=True, - extra_env_variables={ - 'ANSIBLE_BECOME': True, - }, - extra_vars=extra_vars, - timeout=deployment_timeout, - forks=forks - ) - - _log_and_print( - message='Overcloud configuration completed for stack: {}'.format( - stack_name - ), - logger=log, - print_msg=(verbosity == 0) - ) - - snapshot_dir(stack_work_dir) - - -def snapshot_dir(directory): - """Git snapshot a directory - - :params directory: Directory to snapshot - :type directory: string - :returns: None - """ - if os.path.exists(directory): - # Object to the git repository - repo = git.Repo(directory) - - # Configure git user.name and user.email - git_config_user = "tripleo-admin" - git_config_email = git_config_user + '@' + os.uname().nodename.strip() - repo.config_writer().set_value( - "user", "name", git_config_user - ).release() - repo.config_writer().set_value( - "user", "email", git_config_email - ).release() - - # Add and commit all files to the git repository - repo.git.add(".") - repo.git.commit("--amend", "--no-edit") - - -def get_horizon_url(stack, verbosity=0, - heat_type='pod', - working_dir=None): - """Return horizon URL string. - - :params stack: Stack name - :type stack: string - :returns: string - """ - - try: - if tc_heat_utils.heatclient: - tc_heat_utils.heatclient.save_environment() - playbook = 'cli-undercloud-get-horizon-url.yaml' - ansible_work_dir = os.path.join( - working_dir, os.path.splitext(playbook)[0]) - horizon_file = os.path.join(ansible_work_dir, 'horizon_url') - utils.run_ansible_playbook( - playbook=playbook, - inventory='localhost,', - workdir=ansible_work_dir, - playbook_dir=ANSIBLE_TRIPLEO_PLAYBOOKS, - verbosity=verbosity, - reproduce_command=True, - extra_vars={ - 'stack_name': stack, - 'horizon_url_output_file': horizon_file - } - ) - finally: - if tc_heat_utils.heatclient: - tc_heat_utils.heatclient.restore_environment() - - with open(horizon_file) as f: - return f.read().strip() - - -def get_deployment_status(clients, stack_name, working_dir): - """Return current deployment status.""" - try: - status_yaml = utils.get_status_yaml(stack_name, working_dir) - with open(status_yaml, 'r') as status_stream: - return yaml.safe_load(status_stream)['deployment_status'] - except Exception: - return None - - -def set_deployment_status(stack_name, status, working_dir): - utils.update_deployment_status( - stack_name=stack_name, - status=status, - working_dir=working_dir) - - -def make_config_download_dir(config_download_dir, stack): - utils.makedirs(config_download_dir) - utils.makedirs(DEFAULT_WORK_DIR) - # Symlink for the previous default config-download dir to the - # new consistent location. - # This will create the following symlink: - # ~/config-download/ -> - # ~/overcloud-deploy//config-download/ - old_config_download_stack_dir = \ - os.path.join(DEFAULT_WORK_DIR, stack) - new_config_download_stack_dir = \ - os.path.join(config_download_dir, stack) - - if os.path.islink(old_config_download_stack_dir): - return - - # Migrate the old directory to the new, if the new does not yet exist - if (os.path.isdir(old_config_download_stack_dir) and - not os.path.exists(new_config_download_stack_dir)): - shutil.move(old_config_download_stack_dir, - new_config_download_stack_dir) - - # Remove everything at the old path - if os.path.exists(old_config_download_stack_dir): - shutil.rmtree(old_config_download_stack_dir, - ignore_errors=True) - - # Symlink the old path to the new tree for backwards compatibility - os.symlink(new_config_download_stack_dir, - old_config_download_stack_dir) diff --git a/tripleoclient/workflows/parameters.py b/tripleoclient/workflows/parameters.py deleted file mode 100644 index c70ec15b5..000000000 --- a/tripleoclient/workflows/parameters.py +++ /dev/null @@ -1,260 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import os -import re -import yaml - -from tripleo_common.utils import stack_parameters as stk_parameters - -from tripleoclient.constants import UNUSED_PARAMETER_EXCLUDES_RE -from tripleoclient import exceptions -from tripleoclient import utils -from tripleoclient.workflows import roles - - -LOG = logging.getLogger(__name__) - - -def check_deprecated_parameters(clients, stack_name, template, files, - env_files_tracker, working_dir): - """Checks for deprecated parameters and adds warning if present. - - :param clients: application client object. - :type clients: Object - :param stack_neme: Heat stack name - :type stack_name: String - :param template: - :type template: String - :param files: - :type files: - :param env_files_tracker: - :type env_files_tracker: - :param working_dir: Tripleo working directory - :type working_dir: String - """ - - # Get role list - role_list = roles.get_roles(clients, stack_name, template, files, - env_files_tracker, working_dir, - detail=False, - valid=True) - - # Build stack_data - stack_data = utils.build_stack_data( - clients, stack_name, template, - files, env_files_tracker) - user_params = stack_data.get('environment_parameters', {}) - heat_resource_tree = stack_data.get('heat_resource_tree', {}) - heat_resource_tree_params = heat_resource_tree.get('parameters', {}) - heat_resource_tree_resources = heat_resource_tree.get('resources', {}) - all_params = heat_resource_tree_params.keys() - parameter_groups = [ - i.get('parameter_groups') - for i in heat_resource_tree_resources.values() - if i.get('parameter_groups') - ] - params_role_specific_tag = [ - i.get('name') - for i in heat_resource_tree_params.values() - if 'tags' in i and 'role_specific' in i['tags'] - ] - - r = re.compile(".*Count") - filtered_names = list(filter(r.match, all_params)) - valid_role_name_list = list() - for name in filtered_names: - default = heat_resource_tree_params[name].get('default', 0) - if default and int(default) > 0: - role_name = name.rstrip('Count') - if [i for i in role_list if i == role_name]: - valid_role_name_list.append(role_name) - - deprecated_params = [ - i[0] for i in parameter_groups - if i[0].get('label') == 'deprecated' - ] - # We are setting a frozenset here because python 3 complains that dict is - # a unhashable type. - # On user_defined, we check if the size is higher than 0 because an empty - # frozenset still is a subset of a frozenset, so we can't use issubset - # here. - user_params_keys = frozenset(user_params.keys()) - deprecated_result = [ - { - 'parameter': i, - 'deprecated': True, - 'user_defined': len( - [x for x in frozenset(i) if x in user_params_keys]) > 0 - } - for i in deprecated_params - ] - unused_params = [i for i in user_params.keys() if i not in all_params] - user_provided_role_specific = [ - v for i in role_list - for k, v in user_params.items() - if k in i - ] - invalid_role_specific_params = [ - i for i in user_provided_role_specific - if i in params_role_specific_tag - ] - deprecated_parameters = [ - param['parameter'] for param in deprecated_result - if param.get('user_defined') - ] - - if deprecated_parameters: - deprecated_join = ', '.join(deprecated_parameters) - LOG.warning( - 'WARNING: Following parameter(s) are deprecated and still ' - 'defined. Deprecated parameters will be removed soon!' - ' {deprecated_join}'.format( - deprecated_join=deprecated_join - ) - ) - - # exclude our known params that may not be used - ignore_re = re.compile('|'.join(UNUSED_PARAMETER_EXCLUDES_RE)) - unused_params = [p for p in unused_params if not ignore_re.search(p)] - - if unused_params: - unused_join = ', '.join(unused_params) - LOG.warning( - 'WARNING: Following parameter(s) are defined but not ' - 'currently used. These parameters ' - 'may be valid but not in use due to the service or ' - 'deployment configuration.' - ' {unused_join}'.format( - unused_join=unused_join - ) - ) - - if invalid_role_specific_params: - invalid_join = ', '.join(invalid_role_specific_params) - LOG.warning( - 'WARNING: Following parameter(s) are not supported as ' - 'role-specific inputs. {invalid_join}'.format( - invalid_join=invalid_join - ) - ) - - -def generate_fencing_parameters(nodes_json, delay, ipmi_level, - ipmi_cipher, ipmi_lanplus): - """Generate and return fencing parameters. - - :param nodes_json: list of nodes & attributes in json format - :type nodes_json: List - - :param delay: time to wait before taking fencing action - :type delay: Integer - - :param ipmi_level: IPMI user level to use - :type ipmi_level: String - - :param ipmi_cipher: IPMI cipher suite to use - :type ipmi_cipher: String - - :param ipmi_lanplus: whether to use IPMIv2.0 - :type ipmi_lanplus: Boolean - - :returns: Dictionary - """ - return stk_parameters.generate_fencing_parameters( - nodes_json=nodes_json, - delay=delay, - ipmi_level=ipmi_level, - ipmi_cipher=ipmi_cipher, - ipmi_lanplus=ipmi_lanplus) - - -def check_forbidden_params(log, env_files, forbidden): - """Looks for undesired parameters in the environment files. - - Each of the environment files pass in env_files will be parsed - and if the parameters_default key is found, then all the keys - from the nested dictionary found under will be converted into - a list, for example: - - parameters_default: - key1: value1 - key2: value2 - key3: - - value3 - - key31: - key311: value311 - key312: value312 - key32: value32 - - Will be converted by get_all_keys into: - [key1, key2, key3, key31, key311, key312, key32] - - This list provides us with all the parameters used in the environment - file, without the values, in the format of a list. So we can use sets - to find occurrences of the forbbiden paramenters. - - The variable matched_params will get all the ocurrences of forbidden - parameters stored, so we can parse all the environment files and show - all the parameters which should get removed from the environment files - at once (saving the user to run the command, modify a template, run it - again, modify another, etc...). If matched_params list is not empty, - an exception will be raised, stopping the execution of the command and - displaying the commands which need to be removed. - - :param log: logging object passed from the calling method - :type log: Logging object - :param env_files: list of the environment files passed in the command - :type env_files: list of strings - :param forbidden: list of the undesired parameters - :type forbidden: list of strings - - :returns exception if some of the forbidden parameters are found in - the environment files. - """ - - # Iterates over a nested dict and returns all the - # keys from the dict in a list - # example: - # * input: {'a': '1', 'b': ['c': '2', 'd': {'e': '3'}]} - # * output: ['a', 'b', 'c', 'd', 'e'] - def get_all_keys(obj, keys_list): - if isinstance(obj, dict): - keys_list += obj.keys() - for value in obj.values(): - get_all_keys(value, keys_list) - elif isinstance(obj, list): - for value in obj: - get_all_keys(value, keys_list) - - matched_params = [] - - for file in env_files: - if os.path.exists(file): - with open(file, 'r') as env_file: - contents = yaml.safe_load(env_file) - pd = contents.get('parameter_defaults', {}) - if pd: - # Intersection of values and forbidden params - list_of_keys = [] - get_all_keys(pd, list_of_keys) - found_in_pd = list(set(list_of_keys) & set(forbidden)) - - # Combine them without duplicates - matched_params = list(set(matched_params + found_in_pd)) - - if matched_params: - raise exceptions.BannedParameters("The following parameters should be " - "removed from the environment files:" - "\n{}\n" - .format('\n'.join(matched_params))) diff --git a/tripleoclient/workflows/roles.py b/tripleoclient/workflows/roles.py deleted file mode 100644 index a784a2572..000000000 --- a/tripleoclient/workflows/roles.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from tripleoclient import utils - -LOG = logging.getLogger(__name__) - - -def get_roles(clients, - stack_name, - template, - files, - env_files, - working_dir, - detail=False, valid=False): - roles_data = utils.get_roles_data(working_dir, stack_name) - - if detail: - return roles_data - - role_names = [role['name'] for role in roles_data] - - if not valid: - return role_names - - stack_data = utils.build_stack_data( - clients, stack_name, template, - files, env_files) - - valid_roles = [] - for name in role_names: - role_count = stack_data['heat_resource_tree'][ - 'parameters'].get(name + 'Count', {}).get( - 'default', 0) - if role_count > 0: - valid_roles.append(name) - - return valid_roles diff --git a/tripleoclient/workflows/tripleo_baremetal.py b/tripleoclient/workflows/tripleo_baremetal.py deleted file mode 100644 index 4a75004f0..000000000 --- a/tripleoclient/workflows/tripleo_baremetal.py +++ /dev/null @@ -1,532 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from typing import Dict -from typing import List - -from concurrent import futures -from openstack import connect as sdkclient -from openstack import exceptions -from openstack.utils import iterate_timeout -from oslo_utils import units -from tripleoclient import exceptions as ooo_exceptions -from tripleo_common.utils import nodes as node_utils - - -class TripleoBaremetal(object): - - """Base class for TripleO Baremetal operations. - - The TripleoBase class provides access to commonly used elements - required to interact with and perform baremetal operations for TripleO. - - :param timeout: How long to wait until we consider this job to have - timed out - :type timeout: integer - - :param verbosity: How verbose should we be. Currently, this just sets - DEBUG for any non-zero value provided. - :type verbosity: integer - """ - - def __init__(self, timeout: int = 1200, verbosity: int = 1): - self.conn = sdkclient( - cloud='undercloud' - ) - self.timeout = timeout - self.log = logging.getLogger(__name__) - if verbosity > 0: - self.log.setLevel(logging.DEBUG) - - def all_manageable_nodes(self): - """This method returns a list of manageable nodes from Ironic - - We take no arguments and instead create a list of nodes that - are in the manageable state and NOT in maintenenace. We return the - subsequent list. - - Raises: - NoNodeFound: If no nodes match the above description, we will raise - an exception. - - Returns: - nodes: The List of manageable nodes that are not currently in - maintenance. - """ - nodes = [n.id for n in self.conn.baremetal.nodes( - provision_state='manageable', is_maintenance=False)] - - if not nodes: - raise ooo_exceptions.NoNodeFound - - return nodes - - -class TripleoProvide(TripleoBaremetal): - - """TripleoProvide handles state transition of baremetal nodes. - - The TripleoProvide class handles the transition of nodes between the - manageable and available states. - - :param wait_for_bridge_mapping: Bool to determine whether or not we are - waiting for the bridge mapping to be - active in ironic-neutron-agent - :type wait_for_bridge_mapping: bool - - """ - - def __init__(self, wait_for_bridge_mappings: bool = False, - timeout: int = 60, verbosity: int = 1): - - super().__init__(timeout=timeout, verbosity=verbosity) - self.wait_for_bridge_mappings = wait_for_bridge_mappings - - def _wait_for_unlocked(self, node: str, timeout: int): - timeout_msg = f'Timeout waiting for node {node} to be unlocked' - - for count in iterate_timeout(timeout, timeout_msg): - node_info = self.conn.baremetal.get_node( - node, - fields=['reservation'] - ) - - if node_info.reservation is None: - return - - def _wait_for_bridge_mapping(self, node: str): - - client = self.conn.network - try: - node_id = self.conn.baremetal.find_node( - node, ignore_missing=False).id - except exceptions.ResourceNotFound: - self.log.error('Node with UUID: {} not found'.format(node)) - - timeout_msg = (f'Timeout waiting for node {node} to have ' - 'bridge_mappings set in the ironic-neutron-agent ' - 'entry') - - # default agent polling period is 30s, so wait 60s - timeout = 60 - - for count in iterate_timeout(timeout, timeout_msg): - agents = list( - client.agents(host=node_id, binary='ironic-neutron-agent')) - - if agents: - if agents[0].configuration.get('bridge_mappings'): - return - - def provide(self, nodes: str): - - """Transition nodes to the Available state. - - provide handles the state transition from the nodes current state - to the available state - - :param nodes: The node UUID or name that we will be working on - :type nodes: String - """ - - client = self.conn.baremetal - node_timeout = self.timeout - nodes_wait = nodes[:] - - for node in nodes: - self.log.info('Providing node: {}'.format(node)) - self._wait_for_unlocked(node, node_timeout) - - if self.wait_for_bridge_mappings: - self._wait_for_bridge_mapping(node) - - try: - client.set_node_provision_state( - node, - "provide", - wait=False) - - except Exception as e: - nodes_wait.remove(node) - self.log.error( - "Can not start providing for node {}: {}".format( - nodes, e)) - return - - try: - self.log.info( - "Waiting for available state: {}".format(nodes_wait)) - - client.wait_for_nodes_provision_state( - nodes=nodes_wait, - expected_state='available', - timeout=self.timeout, - fail=False - ) - - except exceptions.ResourceFailure as e: - self.log.error("Failed providing nodes due to failure: {}".format( - e)) - return - - except exceptions.ResourceTimeout as e: - self.log.error("Failed providing nodes due to timeout: {}".format( - e)) - - def provide_manageable_nodes(self): - self.provide(self.all_manageable_nodes()) - - -class TripleoClean(TripleoBaremetal): - - """TripleoClean manages the Ironic node cleaning process. - - :param all_manageable: Should we work on all nodes in the manageable state - :type all_manageable: bool - - :param provide: Should we also set the nodes back to the available state - :type provide: bool - - :param timeout: How long should we wait before we consider the nodes to - have failed. - :type timeout: integer - - :param raid_config: The raid configuration we should configure on the node - :type raid_config: Dictionary - - :param concurrency: How many nodes should we do at once - :type concurrency: integer - - :param clean_steps: The Ironic cleaning steps that should be executed on - the nodes - :type clean_steps: List - """ - log = logging.getLogger(__name__) - - def __init__(self, all_manageable: bool = False, provide: bool = False, - timeout: int = 60, raid_config: Dict = {}, - concurrency: int = 1, verbosity: int = 0, - clean_steps: List = [{'interface': 'deploy', - 'step': 'erase_devices_metadata'}]): - super().__init__(verbosity=verbosity, timeout=timeout) - self.all_manageable = all_manageable - self.provide = provide - self.raid_config = raid_config - self.clean_steps = clean_steps - self.concurrency = concurrency - - def _parallel_nodes_cleaning(self, nodes: List): - client = self.conn.baremetal - node_timeout = self.timeout - clean_steps = self.clean_steps - failed_nodes = [] - success_nodes = [] - if self.raid_config: - for node in nodes: - try: - client.update_node( - node, - target_raid_config=self.raid_config - ) - success_nodes.append(node) - self.log.info("Setting the raid configuration " - "for node {} succeeded.".format(node)) - except exceptions.BadRequestException as err: - self.log.error("Setting raid configuration " - "for node {} failed. Error: {}".format( - node, err - )) - failed_nodes.append(node) - nodes.pop(nodes.index(node)) - workers = min(len(nodes), self.concurrency) or 1 - with futures.ThreadPoolExecutor(max_workers=workers) as executor: - future_to_build = { - executor.submit( - client.set_node_provision_state, - node, - "clean", - clean_steps=clean_steps, - wait=True - ): node for node in nodes - } - done, not_done = futures.wait( - future_to_build, - timeout=node_timeout, - return_when=futures.ALL_COMPLETED - ) - try: - self.log.info( - "Waiting for manageable state: {}".format(nodes)) - res = client.wait_for_nodes_provision_state( - nodes=nodes, - expected_state='manageable', - timeout=self.timeout, - fail=False - ) - except exceptions.ResourceFailure as e: - self.log.error("Failed providing nodes due to failure: {}".format( - e)) - except exceptions.ResourceTimeout as e: - self.log.error("Failed providing nodes due to timeout: {}".format( - e)) - finally: - err_nodes = [n.name for n in res if n.last_error] - s_nodes = [n.name for n in res if not n.last_error] - for node in err_nodes: - failed_nodes.append(node) - for node in s_nodes: - success_nodes.append(node) - - return(set(failed_nodes), set(success_nodes)) - - def clean_manageable_nodes(self): - self.clean(nodes=self.all_manageable_nodes()) - - def clean(self, nodes: List): - """clean manages the cleaning process for the Ironic nodes. - - Using the provided clean steps, this method will clean the provided - baremetal nodes. - - :param nodes: A list of nodes to clean - :type nodes: List - """ - if not nodes: - self.log.error("Provide either UUID or names of nodes!") - try: - failed_nodes, success_nodes = self._parallel_nodes_cleaning( - nodes) - if failed_nodes: - msg = ("Cleaning completed with failures. " - f"{failed_nodes} node(s) failed.") - self.log.error(msg) - else: - msg = ("Cleaning completed " - f"successfully: {len(success_nodes)} nodes") - self.log.info(msg) - except exceptions.OpenStackCloudException as err: - self.log.error(str(err)) - - -class TripleoConfigure(TripleoBaremetal): - - """TripleoConfigure handles properties for the ironic nodes. - - We use this class to set the properties for each node such as the - kernel, ramdisk, boot device, root_device. - - :param kernel_name: The name of the kernel image we will deploy - :type kernel_name: String - - :param ramdisk_name: The name of the ramdisk image we will deploy - :type ramdisk_name: String - - :param instance_boot: Should the node boot from local disks or something - else - :type instance_boot: String - - :param boot_mode: Is this node using BIOS or UEFI - :type boot_mode: String - - :param: root_device: What is the root device for this node. eg /dev/sda - :type root_device: String - - :param root_device_minimum_size: What is the smallest disk we should - consider acceptable for deployment - :type root_device: Integer - - :param overwrite_root_device_hints: Should we overwrite existing root - device hints when root_device is used. - :type overwrite_root_device_hints: Boolean - """ - - log = logging.getLogger(__name__) - - def __init__(self, kernel_name: str = None, ramdisk_name: str = None, - instance_boot_option: str = None, boot_mode: str = None, - root_device: str = None, verbosity: int = 0, - root_device_minimum_size: int = 4, - overwrite_root_device_hints: bool = False): - - super().__init__(verbosity=verbosity) - self.kernel_name = kernel_name - self.ramdisk_name = ramdisk_name - self.instance_boot_option = instance_boot_option - self.boot_mode = boot_mode - self.root_device = root_device - self.root_device_minimum_size = root_device_minimum_size - self.overwrite_root_device_hints = overwrite_root_device_hints - - def _apply_root_device_strategy(self, node_uuid: List, - strategy: str, minimum_size: int = 4, - overwrite: bool = False): - clients = self.conn - node = clients.baremetal.find_node(node_uuid) - - if node.properties.get('root_device') and not overwrite: - # This is a correct situation, we still want to allow people to - # fine-tune the root device setting for a subset of nodes. - # However, issue a warning, so that they know which nodes were not - # updated during this run. - self.log.warning('Root device hints are already set for node ' - '{} and overwriting is not requested,' - ' skipping'.format(node.id)) - self.log.warning('You may unset them by running $ ironic ' - 'node-update {} remove ' - 'properties/root_device'.format(node.id)) - return - - inspector_client = self.conn.baremetal_introspection - baremetal_client = self.conn.baremetal - - try: - data = inspector_client.get_introspection_data(node.id) - except Exception: - raise exceptions.RootDeviceDetectionError( - f'No introspection data found for node {node.id}, ' - 'root device cannot be detected') - try: - disks = data['inventory']['disks'] - except KeyError: - raise exceptions.RootDeviceDetectionError( - f'Malformed introspection data for node {node.id}: ' - 'disks list is missing') - - minimum_size *= units.Gi - disks = [d for d in disks if d.get('size', 0) >= minimum_size] - - if not disks: - raise exceptions.RootDeviceDetectionError( - f'No suitable disks found for node {node.id}') - - if strategy == 'smallest': - disks.sort(key=lambda d: d['size']) - root_device = disks[0] - elif strategy == 'largest': - disks.sort(key=lambda d: d['size'], reverse=True) - root_device = disks[0] - else: - disk_names = [x.strip() for x in strategy.split(',')] - disks = {d['name']: d for d in disks} - for candidate in disk_names: - try: - root_device = disks['/dev/%s' % candidate] - except KeyError: - continue - else: - break - else: - raise exceptions.RootDeviceDetectionError( - f'Cannot find a disk with any of names {strategy} ' - f'for node {node.id}') - - hint = None - - for hint_name in ('wwn_with_extension', 'wwn', 'serial'): - if root_device.get(hint_name): - hint = {hint_name: root_device[hint_name]} - break - - if hint is None: - # I don't think it might actually happen, but just in case - raise exceptions.RootDeviceDetectionError( - f"Neither WWN nor serial number are known for device " - f"{root_device['name']} " - f"on node {node.id}; root device hints cannot be used") - - # During the introspection process we got local_gb assigned according - # to the default strategy. Now we need to update it. - new_size = root_device['size'] / units.Gi - # This -1 is what we always do to account for partitioning - new_size -= 1 - - baremetal_client.update_node( - node.id, - [{'op': 'add', 'path': '/properties/root_device', 'value': hint}, - {'op': 'add', 'path': '/properties/local_gb', 'value': new_size}]) - self.log.info('Updated root device for node %s, new device ' - 'is %s, new local_gb is %s', - node.id, root_device, new_size - ) - - def _configure_boot(self, node_uuid: List, - kernel_name: str = None, - ramdisk_name: str = None, - instance_boot_option: str = None, - boot_mode: str = None): - - baremetal_client = self.conn.baremetal - - image_ids = {'kernel': kernel_name, 'ramdisk': ramdisk_name} - node = baremetal_client.find_node(node_uuid) - capabilities = node.properties.get('capabilities', {}) - capabilities = node_utils.capabilities_to_dict(capabilities) - - if instance_boot_option is not None: - capabilities['boot_option'] = instance_boot_option - if boot_mode is not None: - capabilities['boot_mode'] = boot_mode - - capabilities = node_utils.dict_to_capabilities(capabilities) - baremetal_client.update_node(node.id, [ - { - 'op': 'add', - 'path': '/properties/capabilities', - 'value': capabilities, - }, - { - 'op': 'add', - 'path': '/driver_info/deploy_ramdisk', - 'value': image_ids['ramdisk'], - }, - { - 'op': 'add', - 'path': '/driver_info/deploy_kernel', - 'value': image_ids['kernel'], - }, - { - 'op': 'add', - 'path': '/driver_info/rescue_ramdisk', - 'value': image_ids['ramdisk'], - }, - { - 'op': 'add', - 'path': '/driver_info/rescue_kernel', - 'value': image_ids['kernel'], - }, - ]) - - def configure(self, node_uuids: List): - - """Configure Node boot options. - - :param node_uuids: List of instance UUID(s). - :type node_uuids: List - - """ - for node_uuid in node_uuids: - self._configure_boot(node_uuid, self.kernel_name, - self.ramdisk_name, self.instance_boot_option, - self.boot_mode) - if self.root_device: - self._apply_root_device_strategy( - node_uuid, - strategy=self.root_device, - minimum_size=self.root_device_minimum_size, - overwrite=self.overwrite_root_device_hints) - - self.log.info('Successfully configured the nodes.') - - def configure_manageable_nodes(self): - self.configure(node_uuids=self.all_manageable_nodes()) diff --git a/zuul.d/layout.yaml b/zuul.d/layout.yaml deleted file mode 100644 index de6cafb1e..000000000 --- a/zuul.d/layout.yaml +++ /dev/null @@ -1,25 +0,0 @@ -- project: - templates: - - check-requirements - - openstack-cover-jobs - - openstackclient-plugin-jobs - - openstack-python3-zed-jobs - - publish-openstack-docs-pti - - release-notes-jobs-python3 - - tripleo-buildimage-jobs - - tripleo-multinode-container-minimal-pipeline - - tripleo-undercloud-jobs-pipeline - - tripleo-standalone-scenarios-pipeline - - tripleo-upgrades-master-pipeline - check: - jobs: - - tripleo-ci-centos-9-content-provider: - dependencies: - - openstack-tox-pep8 - - openstack-tox-py38 - - openstack-tox-py39 - - tripleo-ci-centos-9-standalone: - vars: - enable_validation: true - validation_component: validation - command: openstack tripleo validator