diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 09fb4c5b9..000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = instack_undercloud -omit = instack_undercloud/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 1bf8d6ae2..000000000 --- a/.gitignore +++ /dev/null @@ -1,54 +0,0 @@ -*.py[cod] -*.sw[op] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -cover -.tox -.testrepository -nosetests.xml - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject -*.bundle -Gemfile.lock - -# Mr Mac User -.DS_Store -._.DS_Store - -# tarballs -*.tar.gz - -# sdist generated stuff -AUTHORS -ChangeLog - -instack.answers - -# Files created by releasenotes build -releasenotes/build diff --git a/.gitreview b/.gitreview index 6dcc1ac15..ef1bce937 100644 --- a/.gitreview +++ b/.gitreview @@ -3,3 +3,4 @@ host=review.openstack.org port=29418 project=openstack/instack-undercloud defaultbranch=master + diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 8c84c21cc..000000000 --- a/.testr.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./instack_undercloud ./instack_undercloud $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 114397c50..000000000 --- a/Gemfile +++ /dev/null @@ -1,22 +0,0 @@ -source ENV['GEM_SOURCE'] || "https://rubygems.org" - -group :development, :test, :system_tests do - gem 'puppet-openstack_spec_helper', - :git => 'https://git.openstack.org/openstack/puppet-openstack_spec_helper', - :branch => 'master', - :require => false -end - -if facterversion = ENV['FACTER_GEM_VERSION'] - gem 'facter', facterversion, :require => false -else - gem 'facter', :require => false -end - -if puppetversion = ENV['PUPPET_GEM_VERSION'] - gem 'puppet', puppetversion, :require => false -else - gem 'puppet', :require => false -end - -# vim:ft=ruby diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db85882..000000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/README.md b/README.md deleted file mode 100644 index c27cc9c8a..000000000 --- a/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Team and repository tags -======================== - -[![Team and repository tags](https://governance.openstack.org/tc/badges/instack-undercloud.svg)](https://governance.openstack.org/tc/reference/tags/index.html) - - - -Undercloud Install via instack -============================== - -instack-undercloud is tooling for installing a TripleO undercloud. - -It is part of the TripleO project: -https://docs.openstack.org/tripleo-docs/latest/ - -* Free software: Apache license -* Source: https://git.openstack.org/cgit/openstack/instack-undercloud -* Bugs: https://bugs.launchpad.net/tripleo diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..b78599a92 --- /dev/null +++ b/README.rst @@ -0,0 +1,5 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git source code management system. To see the contents of this repository before it reached its end of life, please check out the previous commit with "git checkout HEAD^1". + +For any further questions, please email openstack-dev@lists.openstack.org or join #openstack-dev on Freenode. diff --git a/Rakefile b/Rakefile deleted file mode 100644 index 479c55f50..000000000 --- a/Rakefile +++ /dev/null @@ -1,6 +0,0 @@ -require 'puppetlabs_spec_helper/rake_tasks' -require 'puppet-lint/tasks/puppet-lint' - -PuppetLint.configuration.fail_on_warnings = true -PuppetLint.configuration.send('disable_80chars') -PuppetSyntax.fail_on_deprecation_notices = false diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 81d75589e..000000000 --- a/bindep.txt +++ /dev/null @@ -1,2 +0,0 @@ -libssl-dev [platform:dpkg test] -openssl-devel [platform:rpm test] diff --git a/config-generator/undercloud.conf b/config-generator/undercloud.conf deleted file mode 100644 index 47d92c8a6..000000000 --- a/config-generator/undercloud.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -output_file = undercloud.conf.sample -namespace = instack-undercloud diff --git a/doc/source/api/index.rst b/doc/source/api/index.rst deleted file mode 100644 index 25686f80e..000000000 --- a/doc/source/api/index.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - undercloud \ No newline at end of file diff --git a/doc/source/api/undercloud.rst b/doc/source/api/undercloud.rst deleted file mode 100644 index 852737835..000000000 --- a/doc/source/api/undercloud.rst +++ /dev/null @@ -1,8 +0,0 @@ -=================== - :mod:`undercloud` -=================== - -.. automodule:: instack_undercloud.undercloud - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 35a56e712..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,278 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'openstackdocstheme', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Instack Undercloud' -copyright = u'2015, OpenStack Foundation' -bug_tracker = u'Launchpad' -bug_tracker_url = u'https://launchpad.net/tripleo' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '3.0.0' -# The full version, including alpha/beta/rc tags. -release = '3.0.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'instack-underclouddoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'instack-undercloud.tex', u'instack-undercloud Documentation', - u'2015, OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'instack-undercloud', u'instack-undercloud Documentation', - [u'2015, OpenStack Foundation'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'instack-undercloud', u'instack-undercloud Documentation', - u'2015, OpenStack Foundation', 'instack-undercloud', - 'Tooling for installing TripleO undercloud.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# openstackdocstheme options -repository_name = 'openstack/instack-undercloud' -bug_project = 'tripleo' -bug_tag = 'documentation' - -rst_prolog = """ -.. |project| replace:: %s -.. |bug_tracker| replace:: %s -.. |bug_tracker_url| replace:: %s -""" % (project, bug_tracker, bug_tracker_url) diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 42d8e0ef5..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -Welcome to |project| documentation -==================================== - -The instack-undercloud project has code and diskimage-builder -elements for deploying a TripleO undercloud to an existing system. - -See the `TripleO documentation`_ for the full end-to-end workflow. - -.. _`TripleO documentation`: https://docs.openstack.org/tripleo-docs/latest/ - -API Documentation -================= - -.. toctree:: - :maxdepth: 1 - - api/index - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/elements/centos-cr/README.rst b/elements/centos-cr/README.rst deleted file mode 100644 index 29aaecc81..000000000 --- a/elements/centos-cr/README.rst +++ /dev/null @@ -1,4 +0,0 @@ -Enable the CentOS CR Repo - -Allow use of packages from the CentOS CR repository, per the instructions at -https://wiki.centos.org/AdditionalResources/Repositories/CR diff --git a/elements/centos-cr/pre-install.d/00-enable-cr-repo b/elements/centos-cr/pre-install.d/00-enable-cr-repo deleted file mode 100755 index 2162e4817..000000000 --- a/elements/centos-cr/pre-install.d/00-enable-cr-repo +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -set -eux -set -o pipefail - -# Per https://seven.centos.org/2015/03/centos-7-cr-repo-has-been-populated/ -# we need to update before we can enable the cr repo. -yum -y update -yum-config-manager --enable cr diff --git a/elements/instack-vm/README.md b/elements/instack-vm/README.md deleted file mode 100644 index 12dd1d20f..000000000 --- a/elements/instack-vm/README.md +++ /dev/null @@ -1,9 +0,0 @@ -Build an instack vm image - -This element allows building an instack vm image using diskimage-builder. To build -the image simply include this element and the appropriate distro element. -For example: - -disk-image-create -a amd64 -o instack \ - --image-size 30 \ - fedora instack-vm \ No newline at end of file diff --git a/elements/instack-vm/element-deps b/elements/instack-vm/element-deps deleted file mode 100644 index af9e8814f..000000000 --- a/elements/instack-vm/element-deps +++ /dev/null @@ -1,3 +0,0 @@ -local-config -package-installs -vm diff --git a/elements/instack-vm/extra-data.d/50-add-instack-files b/elements/instack-vm/extra-data.d/50-add-instack-files deleted file mode 100755 index 593b6fdb1..000000000 --- a/elements/instack-vm/extra-data.d/50-add-instack-files +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -set -eu -set -o pipefail - -ANSWERSFILE=${ANSWERSFILE:-""} - -if [ -z "$ANSWERSFILE" ]; then - echo "\$ANSWERSFILE should be defined." - exit 1 -fi - -file_list="$ANSWERSFILE -$TE_DATAFILE" - -for f in $file_list; do - cp "$f" "$TMP_HOOKS_PATH" -done diff --git a/elements/instack-vm/install.d/50-ip-forward b/elements/instack-vm/install.d/50-ip-forward deleted file mode 100755 index dfe2af8cc..000000000 --- a/elements/instack-vm/install.d/50-ip-forward +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -eux -set -o pipefail - -# When using instack-virt-setup, it makes sense to always enable IP forwarding -# so the Overcloud nodes can have external access. -cat > /etc/sysctl.d/ip-forward.conf < /etc/sudoers.d/stack < /etc/hostname -echo "127.0.0.1 $UNDERCLOUD_VM_NAME $UNDERCLOUD_VM_NAME.localdomain" >> /etc/hosts diff --git a/elements/overcloud-full/README.rst b/elements/overcloud-full/README.rst deleted file mode 100644 index 90c912911..000000000 --- a/elements/overcloud-full/README.rst +++ /dev/null @@ -1,13 +0,0 @@ -overcloud-full -============== - -Element for the overcloud-full image created by instack-undercloud. - -Workarounds ------------ - -This element can be used to apply needed workarounds. - -* openstack-glance-api and openstack-glance-registry are currently installed - explicitly since this is not handled by the overcloud-control element from - tripleo-puppet-elements diff --git a/elements/overcloud-full/install.d/50-persistent-journal b/elements/overcloud-full/install.d/50-persistent-journal deleted file mode 100755 index 2721c7b2c..000000000 --- a/elements/overcloud-full/install.d/50-persistent-journal +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -eu -set -o pipefail - -# Enable persistent logging for the systemd journal -mkdir -p /var/log/journal diff --git a/elements/overcloud-full/package-installs.yaml b/elements/overcloud-full/package-installs.yaml deleted file mode 100644 index a533d5319..000000000 --- a/elements/overcloud-full/package-installs.yaml +++ /dev/null @@ -1,2 +0,0 @@ -openstack-glance-api: -openstack-glance-registry: diff --git a/elements/overcloud-full/post-install.d/50-remove-libvirt-default-net b/elements/overcloud-full/post-install.d/50-remove-libvirt-default-net deleted file mode 100755 index 9b13648f5..000000000 --- a/elements/overcloud-full/post-install.d/50-remove-libvirt-default-net +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -eux -set -o pipefail - -rm -f /etc/libvirt/qemu/networks/autostart/default.xml diff --git a/elements/pip-and-virtualenv-override/README.md b/elements/pip-and-virtualenv-override/README.md deleted file mode 100644 index de8ffd5a9..000000000 --- a/elements/pip-and-virtualenv-override/README.md +++ /dev/null @@ -1,4 +0,0 @@ -This element will override the behavior from the pip-and-virtualenv element -from tripleo-image-elements so that python-pip and python-virtualenv are never -installed. - diff --git a/elements/pip-and-virtualenv-override/element-provides b/elements/pip-and-virtualenv-override/element-provides deleted file mode 100644 index 7c8922a0b..000000000 --- a/elements/pip-and-virtualenv-override/element-provides +++ /dev/null @@ -1 +0,0 @@ -pip-and-virtualenv diff --git a/elements/puppet-stack-config/README.rst b/elements/puppet-stack-config/README.rst deleted file mode 100644 index 831caed8c..000000000 --- a/elements/puppet-stack-config/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -puppet-stack-config -------------------- - -puppet-stack-config provides static puppet configuration for a single node -baremetal cloud using the Ironic driver. A yaml template is used to render a -hiera data file at /etc/puppet/hieradata/puppet-stack-config.yaml. - -The template rendering takes its input from a set of defined environment -variables. diff --git a/elements/puppet-stack-config/element-deps b/elements/puppet-stack-config/element-deps deleted file mode 100644 index 240054ddc..000000000 --- a/elements/puppet-stack-config/element-deps +++ /dev/null @@ -1,2 +0,0 @@ -hiera -puppet-modules diff --git a/elements/puppet-stack-config/extra-data.d/10-install-git b/elements/puppet-stack-config/extra-data.d/10-install-git deleted file mode 100755 index 080192a89..000000000 --- a/elements/puppet-stack-config/extra-data.d/10-install-git +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -eux - -yum -y install git diff --git a/elements/puppet-stack-config/install.d/02-puppet-stack-config b/elements/puppet-stack-config/install.d/02-puppet-stack-config deleted file mode 100755 index be71c5d57..000000000 --- a/elements/puppet-stack-config/install.d/02-puppet-stack-config +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import subprocess -import tempfile - -import pystache - -from instack_undercloud import undercloud - -renderer = pystache.Renderer(escape=lambda s: s) -template = os.path.join(os.path.dirname(__file__), - '..', - 'puppet-stack-config.yaml.template') - -context = {item: os.environ.get(item) - for item in undercloud.InstackEnvironment.PUPPET_KEYS} - -endpoint_context = {} -for k, v in os.environ.items(): - if k.startswith('UNDERCLOUD_ENDPOINT_'): - endpoint_context[k] = v -context.update(endpoint_context) - -# Make sure boolean strings are treated as Bool() -for k, v in list(context.items()): - if v == 'False': - context[k] = False - elif v == 'True': - context[k] = True - -with open(template) as f: - puppet_stack_config_yaml = renderer.render(f.read(), context) - -puppet_stack_config_yaml_path = '/etc/puppet/hieradata/puppet-stack-config.yaml' - -if not os.path.exists(os.path.dirname(puppet_stack_config_yaml_path)): - os.makedirs(os.path.dirname(puppet_stack_config_yaml_path)) -with open(puppet_stack_config_yaml_path, 'w') as f: - f.write(puppet_stack_config_yaml) - -# Secure permissions -os.chmod(os.path.dirname(puppet_stack_config_yaml_path), 0750) -os.chmod(puppet_stack_config_yaml_path, 0600) diff --git a/elements/puppet-stack-config/install.d/10-puppet-stack-config-puppet-module b/elements/puppet-stack-config/install.d/10-puppet-stack-config-puppet-module deleted file mode 100755 index 33993fbff..000000000 --- a/elements/puppet-stack-config/install.d/10-puppet-stack-config-puppet-module +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -eux -set -o pipefail - -mkdir -p /etc/puppet/manifests -cp $(dirname $0)/../puppet-stack-config.pp /etc/puppet/manifests/puppet-stack-config.pp diff --git a/elements/puppet-stack-config/os-apply-config/etc/puppet/hieradata/CentOS.yaml b/elements/puppet-stack-config/os-apply-config/etc/puppet/hieradata/CentOS.yaml deleted file mode 100644 index f7298301d..000000000 --- a/elements/puppet-stack-config/os-apply-config/etc/puppet/hieradata/CentOS.yaml +++ /dev/null @@ -1 +0,0 @@ -tripleo::selinux::mode: permissive diff --git a/elements/puppet-stack-config/os-apply-config/etc/puppet/hieradata/RedHat.yaml b/elements/puppet-stack-config/os-apply-config/etc/puppet/hieradata/RedHat.yaml deleted file mode 100644 index 1c9d8d6eb..000000000 --- a/elements/puppet-stack-config/os-apply-config/etc/puppet/hieradata/RedHat.yaml +++ /dev/null @@ -1,22 +0,0 @@ -rabbitmq::package_provider: yum -tripleo::selinux::mode: enforcing -tripleo::profile::base::sshd::options: - HostKey: - - '/etc/ssh/ssh_host_rsa_key' - - '/etc/ssh/ssh_host_ecdsa_key' - - '/etc/ssh/ssh_host_ed25519_key' - SyslogFacility: 'AUTHPRIV' - AuthorizedKeysFile: '.ssh/authorized_keys' - ChallengeResponseAuthentication: 'no' - GSSAPIAuthentication: 'yes' - GSSAPICleanupCredentials: 'no' - UsePAM: 'yes' - UseDNS: 'no' - X11Forwarding: 'yes' - UsePrivilegeSeparation: 'sandbox' - AcceptEnv: - - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES' - - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT' - - 'LC_IDENTIFICATION LC_ALL LANGUAGE' - - 'XMODIFIERS' - Subsystem: 'sftp /usr/libexec/openssh/sftp-server' diff --git a/elements/puppet-stack-config/os-refresh-config/configure.d/50-puppet-stack-config b/elements/puppet-stack-config/os-refresh-config/configure.d/50-puppet-stack-config deleted file mode 100755 index 6433f8ffd..000000000 --- a/elements/puppet-stack-config/os-refresh-config/configure.d/50-puppet-stack-config +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -eux -set -o pipefail - -function puppet_apply { - set +e - $@ 2>&1 - rc=$? - set -e - - echo "puppet apply exited with exit code $rc" - - if [ $rc != 2 -a $rc != 0 ]; then - exit $rc - fi -} - -puppet_apply puppet apply --summarize --detailed-exitcodes /etc/puppet/manifests/puppet-stack-config.pp diff --git a/elements/puppet-stack-config/os-refresh-config/post-configure.d/10-iptables b/elements/puppet-stack-config/os-refresh-config/post-configure.d/10-iptables deleted file mode 100755 index e5e9c297e..000000000 --- a/elements/puppet-stack-config/os-refresh-config/post-configure.d/10-iptables +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -eux -set -o pipefail - -EXTERNAL_BRIDGE=br-ctlplane -iptables -w -t nat -C PREROUTING -d 169.254.169.254/32 -i $EXTERNAL_BRIDGE -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8775 || iptables -w -t nat -I PREROUTING -d 169.254.169.254/32 -i $EXTERNAL_BRIDGE -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8775 diff --git a/elements/puppet-stack-config/package-installs.yaml b/elements/puppet-stack-config/package-installs.yaml deleted file mode 100644 index 3e405aba0..000000000 --- a/elements/puppet-stack-config/package-installs.yaml +++ /dev/null @@ -1,2 +0,0 @@ -pystache: -python-oslo-concurrency: diff --git a/elements/puppet-stack-config/puppet-stack-config.pp b/elements/puppet-stack-config/puppet-stack-config.pp deleted file mode 100644 index 46f1c2e97..000000000 --- a/elements/puppet-stack-config/puppet-stack-config.pp +++ /dev/null @@ -1,729 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -warning('instack-undercloud is deprecated in Rocky and is replaced by containerized-undercloud.') - -# Deploy os-net-config before everything in the catalog -include ::stdlib -class { '::tripleo::network::os_net_config': - stage => 'setup', -} - -# enable ip forwarding for the overcloud nodes to access the outside internet -# in cases where they are on an isolated network -ensure_resource('sysctl::value', 'net.ipv4.ip_forward', { 'value' => 1 }) -# NOTE(aschultz): clear up old file as this used to be managed via DIB -file { '/etc/sysctl.d/ip-forward.conf': - ensure => absent -} -# NOTE(aschultz): LP#1750194 - docker will switch FORWARD to DROP if ip_forward -# is not enabled first. -Sysctl::Value['net.ipv4.ip_forward'] -> Package<| title == 'docker' |> - -# NOTE(aschultz): LP#1754426 - remove cloud-init and disable os-collect-config -package { 'cloud-init': - ensure => 'absent', -} -service { 'os-collect-config': - ensure => stopped, - enable => false, -} - -# Run OpenStack db-sync at every puppet run, in any case. -Exec<| title == 'neutron-db-sync' |> { refreshonly => false } -Exec<| title == 'keystone-manage db_sync' |> { refreshonly => false } -Exec<| title == 'glance-manage db_sync' |> { refreshonly => false } -Exec<| title == 'nova-db-sync-api' |> { refreshonly => false } -Exec<| title == 'nova-db-sync' |> { refreshonly => false } -Exec<| title == 'nova-db-online-data-migrations' |> { refreshonly => false } -Exec<| title == 'ironic-db-online-data-migrations' |> { refreshonly => false } -Exec<| title == 'heat-dbsync' |> { - refreshonly => false, - # Heat database on the undercloud can be really big, db-sync take usually at least 10 min. - timeout => 900, -} -Exec<| title == 'aodh-db-sync' |> { refreshonly => false } -Exec<| title == 'ironic-dbsync' |> { refreshonly => false } -Exec<| title == 'mistral-db-sync' |> { refreshonly => false } -Exec<| title == 'mistral-db-populate' |> { refreshonly => false } -Exec<| title == 'zaqar-manage db_sync' |> { refreshonly => false } -Exec<| title == 'cinder-manage db_sync' |> { refreshonly => false } - -Keystone::Resource::Service_identity { - default_domain => hiera('keystone_default_domain'), -} - -include ::tripleo::profile::base::time::ntp - -include ::rabbitmq -Class['::rabbitmq'] -> Service['httpd'] - -include ::tripleo::firewall -include ::tripleo::selinux -include ::tripleo::profile::base::kernel -include ::tripleo::profile::base::certmonger_user - -if hiera('tripleo::haproxy::service_certificate', undef) { - class {'::tripleo::profile::base::haproxy': - enable_load_balancer => true, - } - include ::tripleo::keepalived - # NOTE: The following is required because we need to make sure that keepalived - # is up and running before rabbitmq. The reason is that when the undercloud is - # with ssl the hostname is configured to one of the VIPs so rabbit will try to - # connect to it at startup and if the VIP is not up it will fail (LP#1782814) - Class['::tripleo::keepalived'] -> Class['::rabbitmq'] - - # NOTE: This is required because the haproxy configuration should be changed - # before any keystone operations are triggered. Without this, it will try to - # access the new endpoints that point to haproxy even if haproxy hasn't - # started yet. The same is the case for ironic and ironic-inspector. - Class['::tripleo::haproxy'] -> Anchor['keystone::install::begin'] -} - -# MySQL -include ::tripleo::profile::base::database::mysql -# Raise the mysql file limit -exec { 'systemctl-daemon-reload': - command => '/bin/systemctl daemon-reload', - refreshonly => true, -} -file { '/etc/systemd/system/mariadb.service.d': - ensure => 'directory', - owner => 'root', - group => 'root', - mode => '0755', -} -file { '/etc/systemd/system/mariadb.service.d/limits.conf': - ensure => 'file', - owner => 'root', - group => 'root', - mode => '0644', - content => "[Service]\nLimitNOFILE=16384\n", - require => File['/etc/systemd/system/mariadb.service.d'], - notify => [Exec['systemctl-daemon-reload'], Service['mysqld']], -} -Exec['systemctl-daemon-reload'] -> Service['mysqld'] - -file { '/var/log/journal': - ensure => 'directory', - owner => 'root', - group => 'root', - mode => '0755', - notify => Service['systemd-journald'], -} -service { 'systemd-journald': - ensure => 'running' -} - -# FIXME: this should only occur on the bootstrap host (ditto for db syncs) -# Create all the database schemas -# Example DSN format: mysql+pymysql://user:password@host/dbname -$allowed_hosts = ['%',hiera('controller_host')] -$re_dsn = '//([^:]+):([^@]+)@\[?([^/]+?)\]?/([a-z_-]+)' -$keystone_dsn = match(hiera('keystone::database_connection'), $re_dsn) -class { '::keystone::db::mysql': - user => $keystone_dsn[1], - password => $keystone_dsn[2], - host => $keystone_dsn[3], - dbname => $keystone_dsn[4], - allowed_hosts => $allowed_hosts, -} -$glance_dsn = match(hiera('glance::api::database_connection'), $re_dsn) -class { '::glance::db::mysql': - user => $glance_dsn[1], - password => $glance_dsn[2], - host => $glance_dsn[3], - dbname => $glance_dsn[4], - allowed_hosts => $allowed_hosts, -} -$nova_dsn = match(hiera('nova::database_connection'), $re_dsn) -class { '::nova::db::mysql': - user => $nova_dsn[1], - password => $nova_dsn[2], - host => $nova_dsn[3], - dbname => $nova_dsn[4], - allowed_hosts => $allowed_hosts, -} -$nova_api_dsn = match(hiera('nova::api_database_connection'), $re_dsn) -class { '::nova::db::mysql_api': - user => $nova_api_dsn[1], - password => $nova_api_dsn[2], - host => $nova_api_dsn[3], - dbname => $nova_api_dsn[4], - allowed_hosts => $allowed_hosts, -} -$nova_placement_dsn = match(hiera('nova::placement_database_connection'), $re_dsn) -class { '::nova::db::mysql_placement': - user => $nova_placement_dsn[1], - password => $nova_placement_dsn[2], - host => $nova_placement_dsn[3], - dbname => $nova_placement_dsn[4], - allowed_hosts => $allowed_hosts, -} -$neutron_dsn = match(hiera('neutron::server::database_connection'), $re_dsn) -class { '::neutron::db::mysql': - user => $neutron_dsn[1], - password => $neutron_dsn[2], - host => $neutron_dsn[3], - dbname => $neutron_dsn[4], - allowed_hosts => $allowed_hosts, -} -$heat_dsn = match(hiera('heat_dsn'), $re_dsn) -class { '::heat::db::mysql': - user => $heat_dsn[1], - password => $heat_dsn[2], - host => $heat_dsn[3], - dbname => $heat_dsn[4], - allowed_hosts => $allowed_hosts, -} -if str2bool(hiera('enable_telemetry', false)) { - - # Ceilometer - - include ::ceilometer::keystone::auth - include ::aodh::keystone::auth - include ::ceilometer - include ::ceilometer::agent::notification - include ::ceilometer::agent::central - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - - # We need to use exec as the keystone dependency wouldnt allow - # us to wait until service is up before running upgrade. This - # is because both keystone, gnocchi and ceilometer run under apache. - exec { 'ceilo-gnocchi-upgrade': - command => 'ceilometer-upgrade --skip-metering-database', - path => ['/usr/bin', '/usr/sbin'], - } - - # This ensures we can do service validation on gnocchi api before - # running ceilometer-upgrade - $command = join(['curl -s', - hiera('gnocchi_healthcheck_url')], ' ') - - openstacklib::service_validation { 'gnocchi-status': - command => $command, - tries => 20, - refreshonly => true, - subscribe => Anchor['gnocchi::service::end'] - } - -# Ensure all endpoint exists and only then run the upgrade. - Keystone::Resource::Service_identity<||> - -> Openstacklib::Service_validation['gnocchi-status'] - -> Exec['ceilo-gnocchi-upgrade'] - - # Aodh - $aodh_dsn = match(hiera('aodh::db::database_connection'), $re_dsn) - class { '::aodh::db::mysql': - user => $aodh_dsn[1], - password => $aodh_dsn[2], - host => $aodh_dsn[3], - dbname => $aodh_dsn[4], - allowed_hosts => $allowed_hosts, - } - include ::aodh - include ::aodh::api - include ::aodh::wsgi::apache - include ::aodh::evaluator - include ::aodh::notifier - include ::aodh::listener - include ::aodh::client - include ::aodh::db::sync - include ::aodh::auth - include ::aodh::config - - # Gnocchi - $gnocchi_dsn = match(hiera('gnocchi::db::database_connection'), $re_dsn) - class { '::gnocchi::db::mysql': - user => $gnocchi_dsn[1], - password => $gnocchi_dsn[2], - host => $gnocchi_dsn[3], - dbname => $gnocchi_dsn[4], - allowed_hosts => $allowed_hosts, - } - include ::gnocchi - include ::gnocchi::keystone::auth - include ::gnocchi::api - include ::gnocchi::wsgi::apache - include ::gnocchi::client - include ::gnocchi::db::sync - include ::gnocchi::storage - include ::gnocchi::metricd - include ::gnocchi::statsd - include ::gnocchi::config - $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) - case $gnocchi_backend { - 'swift': { include ::gnocchi::storage::swift } - 'file': { include ::gnocchi::storage::file } - 'rbd': { include ::gnocchi::storage::ceph } - default: { fail('Unrecognized gnocchi_backend parameter.') } - } - - # Panko - $panko_dsn = match(hiera('panko::db::database_connection'), $re_dsn) - class { '::panko::db::mysql': - user => $panko_dsn[1], - password => $panko_dsn[2], - host => $panko_dsn[3], - dbname => $panko_dsn[4], - allowed_hosts => $allowed_hosts, - } - include ::panko - include ::panko::keystone::auth - include ::panko::config - include ::panko::db - include ::panko::db::sync - include ::panko::api - include ::panko::wsgi::apache - include ::panko::client -} else { - # If Telemetry is disabled, ensure we tear down everything: - # packages, services, configuration files. - Package { [ - 'python-aodh', - 'python-ceilometer', - 'python-gnocchi', - 'python-panko' - ]: - ensure => 'purged', - notify => Service['httpd'], - } - File { [ - '/etc/httpd/conf.d/10-aodh_wsgi.conf', - '/etc/httpd/conf.d/10-ceilometer_wsgi.conf', - '/etc/httpd/conf.d/10-gnocchi_wsgi.conf', - '/etc/httpd/conf.d/10-panko_wsgi.conf', - ]: - ensure => absent, - notify => Service['httpd'], - } -} - -$ironic_dsn = match(hiera('ironic::database_connection'), $re_dsn) -class { '::ironic::db::mysql': - user => $ironic_dsn[1], - password => $ironic_dsn[2], - host => $ironic_dsn[3], - dbname => $ironic_dsn[4], - allowed_hosts => $allowed_hosts, -} - -$ironic_inspector_dsn = match(hiera('ironic::inspector::db::database_connection'), $re_dsn) -class { '::ironic::inspector::db::mysql': - user => $ironic_inspector_dsn[1], - password => $ironic_inspector_dsn[2], - host => $ironic_inspector_dsn[3], - dbname => $ironic_inspector_dsn[4], - allowed_hosts => $allowed_hosts, -} - -# pre-install swift here so we can build rings -include ::swift - -if hiera('tripleo::haproxy::service_certificate', undef) { - $keystone_public_endpoint = join(['https://', hiera('controller_public_host'), ':13000']) - $enable_proxy_headers_parsing = true -} else { - $keystone_public_endpoint = undef - $enable_proxy_headers_parsing = false -} - -if str2bool(hiera('enable_telemetry', false)) { - $notification_topics = ['notifications'] -} else { - $notification_topics = [] -} - -class { '::keystone': - enable_proxy_headers_parsing => $enable_proxy_headers_parsing, - notification_topics => $notification_topics, -} -include ::keystone::wsgi::apache -include ::keystone::cron::token_flush -include ::keystone::roles::admin -include ::keystone::endpoint -include ::keystone::cors -include ::keystone::config - -include ::heat::keystone::auth -include ::heat::keystone::auth_cfn -include ::neutron::keystone::auth -include ::glance::keystone::auth -include ::nova::keystone::auth -include ::nova::keystone::auth_placement -include ::swift::keystone::auth -include ::ironic::keystone::auth -include ::ironic::keystone::auth_inspector - -#TODO: need a cleanup-keystone-tokens.sh solution here -keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; -} - -# TODO: notifications, scrubber, etc. -class { '::glance::api': - enable_proxy_headers_parsing => $enable_proxy_headers_parsing, -} -include ::glance::backend::swift -include ::glance::notify::rabbitmq - -class { '::nova': - debug => hiera('debug'), - notification_format => 'unversioned', -} - -class { '::nova::api': - enable_proxy_headers_parsing => $enable_proxy_headers_parsing, -} -include ::nova::wsgi::apache_api -include ::nova::cell_v2::simple_setup -include ::nova::placement -include ::nova::wsgi::apache_placement -include ::nova::cron::archive_deleted_rows -include ::nova::cron::purge_shadow_tables -include ::nova::config -include ::nova::conductor -include ::nova::scheduler -include ::nova::scheduler::filter -include ::nova::compute - -class { '::neutron': - debug => hiera('debug'), -} - -include ::neutron::server -include ::neutron::server::notifications -include ::neutron::quota -include ::neutron::plugins::ml2 -include ::neutron::agents::dhcp -include ::neutron::agents::l3 -include ::neutron::plugins::ml2::networking_baremetal -include ::neutron::agents::ml2::networking_baremetal -include ::neutron::config - -# Make sure ironic endpoint exists before starting the service -Keystone_endpoint <||> -> Service['ironic-neutron-agent'] - -class { '::neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -} - -neutron_config { - 'DEFAULT/notification_driver': value => 'messaging'; -} - -# swift proxy -include ::memcached -include ::swift::proxy -include ::swift::ringbuilder -include ::swift::proxy::proxy_logging -include ::swift::proxy::healthcheck -include ::swift::proxy::bulk -include ::swift::proxy::cache -include ::swift::proxy::keystone -include ::swift::proxy::authtoken -include ::swift::proxy::staticweb -include ::swift::proxy::copy -include ::swift::proxy::slo -include ::swift::proxy::dlo -include ::swift::proxy::versioned_writes -include ::swift::proxy::ratelimit -include ::swift::proxy::catch_errors -include ::swift::proxy::tempurl -include ::swift::proxy::formpost -include ::swift::objectexpirer -include ::swift::config - -# swift storage -class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), - allow_versions => true, -} -if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['swift'], - } -} -# This is no longer automatically created by Swift itself -file { '/srv/node/1': - ensure => directory, - owner => 'swift', - group => 'swift', - require => File['/srv/node'], -} -$swift_components = ['account', 'container', 'object'] -swift::storage::filter::recon { $swift_components : } -swift::storage::filter::healthcheck { $swift_components : } - -$controller_host = hiera('controller_host_wrapped') -ring_object_device { "${controller_host}:6000/1": - zone => 1, - weight => 1, -} -Ring_object_device<||> ~> Service['swift-proxy-server'] -ring_container_device { "${controller_host}:6001/1": - zone => 1, - weight => 1, -} -Ring_container_device<||> ~> Service['swift-proxy-server'] -ring_account_device { "${controller_host}:6002/1": - zone => 1, - weight => 1, -} -Ring_account_device<||> ~> Service['swift-proxy-server'] - -# Ensure rsyslog catches up change in /etc/rsyslog.d and forwards logs -exec { 'restart rsyslog': - command => '/bin/systemctl restart rsyslog', -} - -# Apache -include ::apache - -# Heat -class { '::heat': - debug => hiera('debug'), - keystone_ec2_uri => join([hiera('keystone_auth_uri'), '/ec2tokens']), - enable_proxy_headers_parsing => $enable_proxy_headers_parsing, - heat_clients_endpoint_type => hiera('heat_clients_endpoint_type', 'internal'), -} -include ::heat::api -include ::heat::wsgi::apache_api -include ::heat::api_cfn -include ::heat::wsgi::apache_api_cfn -include ::heat::engine -include ::heat::keystone::domain -include ::heat::cron::purge_deleted -include ::heat::cors -include ::heat::config - -include ::keystone::roles::admin - -include ::nova::compute::ironic -include ::nova::network::neutron -include ::nova::cors - -# Ironic - -include ::ironic -include ::ironic::api -include ::ironic::wsgi::apache -include ::ironic::conductor -include ::ironic::drivers::ansible -include ::ironic::drivers::drac -include ::ironic::drivers::ilo -include ::ironic::drivers::inspector -include ::ironic::drivers::interfaces -include ::ironic::drivers::ipmi -include ::ironic::drivers::pxe -include ::ironic::drivers::redfish -include ::ironic::drivers::staging -include ::ironic::glance -include ::ironic::inspector -include ::ironic::inspector::cors -include ::ironic::inspector::pxe_filter -include ::ironic::inspector::pxe_filter::dnsmasq -include ::ironic::neutron -include ::ironic::pxe -include ::ironic::service_catalog -include ::ironic::swift -include ::ironic::cors -include ::ironic::config - -Keystone_endpoint<||> -> Service['ironic-inspector'] - -# https://bugs.launchpad.net/tripleo/+bug/1663273 -Keystone_endpoint <||> -> Service['nova-compute'] -Keystone_service <||> -> Service['nova-compute'] - -# This is a workaround for a race between nova-compute and ironic -# conductor. When https://bugs.launchpad.net/tripleo/+bug/1777608 is -# fixed this can be removed. Currently we wait 1 minutes for the -# ironic conductor service to be ready. As puppet can order thing its -# own way and be slow (especially in CI env) we can have services -# started at more than one minute appart, hence the need for it. -Service[$::ironic::params::conductor_service] -> Service[$::nova::params::compute_service_name] - -if str2bool(hiera('enable_tempest', true)) { - # tempest - package{'openstack-tempest': } - # needed for /bin/subunit-2to1 (called by run_tempest.sh) - package{'subunit-filters': } -} - -# Ensure dm thin-pool is never activated. This avoids an issue -# where the instack host (in this case on a VM) was crashing due to -# activation of the docker thin-pool associated with the atomic host. -augeas { 'lvm.conf': - require => Package['nova-compute'], - context => '/files/etc/lvm/lvm.conf/devices/dict/', - changes => 'set global_filter/list/1/str "r|^/dev/disk/by-path/ip.*iscsi.*\.org\.openstack:.*|"' -} - -if str2bool(hiera('enable_docker_registry', true)) { - ensure_resource('group', 'docker', { - 'ensure' => 'present', - }) - ensure_resource('user', 'docker_user', { - 'name' => hiera('tripleo_install_user'), - 'groups' => 'docker', - 'notify' => Service['docker'], - }) - include ::tripleo::profile::base::docker_registry -} - -include ::mistral -$mistral_dsn = match(hiera('mistral::database_connection'), $re_dsn) -class { '::mistral::db::mysql': - user => $mistral_dsn[1], - password => $mistral_dsn[2], - host => $mistral_dsn[3], - dbname => $mistral_dsn[4], - allowed_hosts => $allowed_hosts, -} -include ::mistral::keystone::auth -include ::mistral::db::sync -include ::mistral::api -include ::mistral::engine -ensure_resource('user', 'mistral', { - 'name' => 'mistral', - 'groups' => 'docker', -}) -include ::mistral::executor -include ::mistral::cors -include ::mistral::cron_trigger -include ::mistral::config - -# ensure TripleO common entrypoints for custom Mistral actions -# are installed before performing the Mistral action population -package {'openstack-tripleo-common': } -Package['openstack-tripleo-common'] ~> Exec['mistral-db-populate'] -# If ironic inspector is not running, mistral-db-populate will have invalid -# actions for it. -Class['::ironic::inspector'] ~> Exec['mistral-db-populate'] -# db-populate calls inspectorclient, which will use the keystone endpoint to -# check inspector's version. So that's needed before db-populate is executed. -Class['::ironic::keystone::auth_inspector'] ~> Exec['mistral-db-populate'] - -if str2bool(hiera('enable_ui', true)) { - include ::tripleo::ui -} - -if str2bool(hiera('enable_validations', true)) { - include ::tripleo::profile::base::validations -} - -include ::zaqar -$zaqar_dsn = match(hiera('zaqar::management::sqlalchemy::uri'), $re_dsn) -class { '::zaqar::db::mysql': - user => $zaqar_dsn[1], - password => $zaqar_dsn[2], - host => $zaqar_dsn[3], - dbname => $zaqar_dsn[4], - allowed_hosts => $allowed_hosts, -} -include ::zaqar::db::sync -include ::zaqar::management::sqlalchemy -include ::zaqar::messaging::swift -include ::zaqar::keystone::auth -include ::zaqar::keystone::auth_websocket -include ::zaqar::transport::websocket -include ::zaqar::transport::wsgi - -include ::zaqar::server -include ::zaqar::wsgi::apache -include ::zaqar::config - -zaqar::server_instance{ '1': - transport => 'websocket' -} - -if str2bool(hiera('enable_cinder', true)) { - $cinder_dsn = match(hiera('cinder::database_connection'), $re_dsn) - class { '::cinder::db::mysql': - user => $cinder_dsn[1], - password => $cinder_dsn[2], - host => $cinder_dsn[3], - dbname => $cinder_dsn[4], - allowed_hosts => $allowed_hosts, - } - include ::cinder::keystone::auth - - include ::cinder - include ::cinder::api - include ::cinder::cron::db_purge - include ::cinder::config - include ::cinder::glance - include ::cinder::scheduler - include ::cinder::volume - include ::cinder::wsgi::apache - - $cinder_backend_name = hiera('cinder_backend_name') - cinder::backend::iscsi { $cinder_backend_name: - iscsi_ip_address => hiera('cinder_iscsi_address'), - iscsi_helper => 'lioadm', - iscsi_protocol => 'iscsi' - } - - include ::cinder::backends - - if str2bool(hiera('cinder_enable_test_volume', false)) { - include ::cinder::setup_test_volume - } -} - -# firewalld is a dependency of some anaconda packages, so we need to use purge -# to ensure all the things that it might be a dependency for are also -# removed. See LP#1669915 -ensure_resource('package', 'firewalld', { - 'ensure' => 'purged', -}) -ensure_resource('package', 'openstack-selinux') -ensure_resource('package', 'parted') -ensure_resource('package', 'psmisc') - -include ::tripleo::profile::base::sshd - -# Swift is using only a single replica on the undercloud. Therefore recovering -# from a corrupted or lost object is not possible, and running replicators and -# auditors only wastes resources. -$needless_services = [ - 'swift-account-auditor', - 'swift-account-replicator', - 'swift-container-auditor', - 'swift-container-replicator', - 'swift-object-auditor', - 'swift-object-replicator'] - -Service[$needless_services] { - enable => false, - ensure => stopped, -} - -# novajoin install -if str2bool(hiera('enable_novajoin', false)) { - include ::nova::metadata::novajoin::auth - include ::nova::metadata::novajoin::api -} - -# Any special handling that need to be done during the upgrade. -if str2bool($::undercloud_upgrade) { - # Noop -} diff --git a/elements/puppet-stack-config/puppet-stack-config.yaml.template b/elements/puppet-stack-config/puppet-stack-config.yaml.template deleted file mode 100644 index 756dfac1f..000000000 --- a/elements/puppet-stack-config/puppet-stack-config.yaml.template +++ /dev/null @@ -1,1049 +0,0 @@ -keystone_identity_uri: {{UNDERCLOUD_ENDPOINT_KEYSTONE_ADMIN}} -keystone_auth_uri: {{UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC}}/v3 -keystone_region: 'regionOne' -keystone_default_domain: 'Default' - -debug: {{UNDERCLOUD_DEBUG}} -controller_host: {{LOCAL_IP}} #local-ipv4 -#local-ipv4 similar to the same hiera key in the overcloud -ctlplane: {{LOCAL_IP}} -controller_host_wrapped: "{{LOCAL_IP_WRAPPED}}" -controller_admin_host: {{UNDERCLOUD_ADMIN_HOST}} -controller_public_host: {{UNDERCLOUD_PUBLIC_HOST}} -{{#UNDERCLOUD_NTP_SERVERS}} -ntp::servers: {{UNDERCLOUD_NTP_SERVERS}} -{{/UNDERCLOUD_NTP_SERVERS}} - -sysctl_settings: {{SYSCTL_SETTINGS}} - -# SSL -tripleo::haproxy::service_certificate: {{UNDERCLOUD_SERVICE_CERTIFICATE}} -generate_service_certificates: {{GENERATE_SERVICE_CERTIFICATE}} -{{#GENERATE_SERVICE_CERTIFICATE}} -tripleo::profile::base::haproxy::certificates_specs: - undercloud-haproxy-public: - service_pem: {{UNDERCLOUD_SERVICE_CERTIFICATE}} - service_certificate: '/etc/pki/tls/certs/undercloud-front.crt' - service_key: '/etc/pki/tls/private/undercloud-front.key' - hostname: "%{hiera('controller_public_host')}" - postsave_cmd: "/usr/bin/instack-haproxy-cert-update '/etc/pki/tls/certs/undercloud-front.crt' '/etc/pki/tls/private/undercloud-front.key' {{UNDERCLOUD_SERVICE_CERTIFICATE}} undercloud-haproxy-public-cert" - principal: {{SERVICE_PRINCIPAL}} -{{/GENERATE_SERVICE_CERTIFICATE}} - -# CA defaults -certmonger_ca: {{CERTIFICATE_GENERATION_CA}} - -# Common Hiera data gets applied to all nodes -ssh::server::storeconfigs_enabled: false - -# memcached -memcached::max_memory: '50%' -memcached::verbosity: 'v' -memcached::disable_cachedump: true -memcached::listen_ip: '127.0.0.1' -memcached::udp_port: 0 - -# Apache -apache::server_signature: 'Off' -apache::server_tokens: 'Prod' - -# ceilometer settings used by compute and controller ceilo auth settings -ceilometer::agent::auth::auth_region: "%{hiera('keystone_region')}" -aodh::auth::auth_region: "%{hiera('keystone_region')}" -ceilometer::agent::auth::auth_tenant_name: 'service' -aodh::auth::auth_tenant_name: 'service' -ceilometer::agent::auth::auth_url: {{UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC}} -aodh::auth::auth_url: "%{hiera('keystone_auth_uri')}" - -# Swift -swift::proxy::proxy_local_net_ip: {{LOCAL_IP}} -swift::proxy::authtoken::auth_uri: "%{hiera('keystone_auth_uri')}" -swift::proxy::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -swift::proxy::node_timeout: 60 -swift::proxy::workers: "%{::os_workers}" -swift::proxy::log_facility: LOG_LOCAL2 -swift::storage::all::storage_local_net_ip: {{LOCAL_IP}} -swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' -swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' -swift::swift_hash_path_suffix: {{UNDERCLOUD_SWIFT_HASH_SUFFIX}} -swift::proxy::account_autocreate: true -swift::proxy::authtoken::password: {{UNDERCLOUD_SWIFT_PASSWORD}} -swift::keystone::auth::tenant: 'service' -swift::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_SWIFT_PUBLIC}} -swift::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_SWIFT_INTERNAL}} -swift::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_SWIFT_ADMIN}} -swift::keystone::auth::password: {{UNDERCLOUD_SWIFT_PASSWORD}} -swift::keystone::auth::region: "%{hiera('keystone_region')}" -swift::keystone::auth::configure_s3_endpoint: false -swift::keystone::auth::operator_roles: - - admin - - swiftoperator -swift_mount_check: false -swift::ringbuilder::replicas: 1 -swift::ringbuilder::part_power: 10 -swift::ringbuilder::min_part_hours: 1 - -swift::proxy::pipeline: - - 'catch_errors' - - 'healthcheck' - - 'proxy-logging' - - 'cache' - - 'ratelimit' - - 'bulk' - - 'tempurl' - - 'formpost' - - 'authtoken' - - 'keystone' - - 'staticweb' - - 'copy' - - 'slo' - - 'dlo' - - 'versioned_writes' - - 'proxy-logging' - - 'proxy-server' - -# Glance -glance::api::debug: "%{hiera('debug')}" -glance::api::bind_port: 9292 -glance::api::bind_host: {{LOCAL_IP}} -glance::api::authtoken::auth_uri: "%{hiera('keystone_auth_uri')}" -glance::api::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -glance::api::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -glance::api::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -glance::api::registry_host: {{LOCAL_IP}} -glance::api::authtoken::password: {{UNDERCLOUD_GLANCE_PASSWORD}} -glance::api::workers: "%{::os_workers}" -glance::api::stores: - - glance.store.filesystem.Store - - glance.store.swift.Store -glance::api::default_store: 'glance.store.swift.Store' -glance::api::pipeline: 'keystone' -# used to construct glance_api_servers -glance_log_file: '' -glance::api::database_connection: mysql+pymysql://glance:{{UNDERCLOUD_GLANCE_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/glance -glance::api::enable_v1_api: false -glance::api::enable_v2_api: true -glance::keystone::auth::tenant: 'service' -glance::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_GLANCE_PUBLIC}} -glance::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_GLANCE_INTERNAL}} -glance::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_GLANCE_ADMIN}} -glance::keystone::auth::password: {{UNDERCLOUD_GLANCE_PASSWORD}} -glance::keystone::auth::region: "%{hiera('keystone_region')}" -glance::backend::swift::swift_store_auth_address: "%{hiera('keystone_auth_uri')}" -glance::backend::swift::swift_store_auth_version: 3 -glance::backend::swift::swift_store_user: service:glance -glance::backend::swift::swift_store_key: {{UNDERCLOUD_GLANCE_PASSWORD}} -glance::backend::swift::swift_store_create_container_on_put: true -glance::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -glance::registry::debug: "%{hiera('debug')}" - -# Heat -heat::debug: "%{hiera('debug')}" -heat_stack_domain_admin_password: {{UNDERCLOUD_HEAT_STACK_DOMAIN_ADMIN_PASSWORD}} -heat::engine::configure_delegated_roles: false -heat::engine::heat_stack_user_role: 'heat_stack_user' -heat::engine::heat_watch_server_url: http://{{LOCAL_IP_WRAPPED}}:8003 -heat::engine::heat_metadata_server_url: http://{{LOCAL_IP_WRAPPED}}:8000 -heat::engine::heat_waitcondition_server_url: http://{{LOCAL_IP_WRAPPED}}:8000/v1/waitcondition -heat::engine::reauthentication_auth_method: 'trusts' -heat::engine::trusts_delegated_roles: [] -heat::engine::auth_encryption_key: {{UNDERCLOUD_HEAT_ENCRYPTION_KEY}} -heat::engine::max_resources_per_stack: -1 -heat::engine::convergence_engine: true -heat::engine::num_engine_workers: "%{::os_workers_heat_engine}" -heat::engine::max_nested_stack_depth: 7 -heat::instance_user: heat-admin -heat::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -heat::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -heat::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -heat::keystone::authtoken::password: {{UNDERCLOUD_HEAT_PASSWORD}} -heat::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -heat::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -heat::keystone::domain::domain_name: 'heat_stack' -heat::keystone::domain::domain_password: {{UNDERCLOUD_HEAT_STACK_DOMAIN_ADMIN_PASSWORD}} -heat::api::bind_host: {{LOCAL_IP}} -heat::api::workers: "%{::os_workers}" -heat::api::service_name: 'httpd' -heat::api_cfn::bind_host: {{LOCAL_IP}} -heat::api_cfn::workers: "%{::os_workers}" -heat::api_cfn::service_name: 'httpd' -heat::wsgi::apache_api::ssl: false -heat::wsgi::apache_api::bind_host: {{LOCAL_IP}} -heat::wsgi::apache_api::workers: "%{hiera('heat::api::workers')}" -heat::wsgi::apache_api_cfn::ssl: false -heat::wsgi::apache_api_cfn::bind_host: {{LOCAL_IP}} -heat::wsgi::apache_api_cfn::workers: "%{hiera('heat::api_cfn::workers')}" -heat::database_connection: mysql+pymysql://heat:{{UNDERCLOUD_HEAT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/heat -heat_dsn: mysql+pymysql://heat:{{UNDERCLOUD_HEAT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/heat -heat::rpc_response_timeout: 600 -heat::keystone::auth::tenant: 'service' -heat::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_HEAT_PUBLIC}} -heat::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_HEAT_INTERNAL}} -heat::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_HEAT_ADMIN}} -heat::keystone::auth::password: {{UNDERCLOUD_HEAT_PASSWORD}} -heat::keystone::auth::region: "%{hiera('keystone_region')}" -heat::keystone::auth_cfn::tenant: 'service' -heat::keystone::auth_cfn::region: "%{hiera('keystone_region')}" -heat::keystone::auth_cfn::password: {{UNDERCLOUD_HEAT_CFN_PASSWORD}} -heat::keystone::auth_cfn::public_url: {{UNDERCLOUD_ENDPOINT_HEAT_CFN_PUBLIC}} -heat::keystone::auth_cfn::internal_url: {{UNDERCLOUD_ENDPOINT_HEAT_CFN_INTERNAL}} -heat::keystone::auth_cfn::admin_url: {{UNDERCLOUD_ENDPOINT_HEAT_CFN_ADMIN}} -heat::cron::purge_deleted::age: 1 -heat::cron::purge_deleted::age_type: 'days' -heat::cron::purge_deleted::destination: '/dev/null' -heat::notification_driver: 'messaging' -heat::yaql_memory_quota: 100000 -heat::yaql_limit_iterators: 1000 -heat::max_json_body_size: 4194304 - -# Keystone -keystone::debug: "%{hiera('debug')}" -keystone::admin_token: {{UNDERCLOUD_ADMIN_TOKEN}} -keystone::admin_password: {{UNDERCLOUD_ADMIN_PASSWORD}} -keystone::admin_workers: "%{::os_workers}" -keystone::public_workers: "%{::os_workers}" -keystone::public_bind_host: {{LOCAL_IP}} -keystone::admin_bind_host: {{LOCAL_IP}} -keystone::public_endpoint: {{UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC}} -keystone::service_name: 'httpd' -keystone_ca_certificate: '{{KEYSTONE_CA_CERTIFICATE}}' -keystone_signing_key: '{{KEYSTONE_SIGNING_KEY}}' -keystone_signing_certificate: '{{KEYSTONE_SIGNING_CERTIFICATE}}' -keystone::database_connection: mysql+pymysql://keystone:{{UNDERCLOUD_ADMIN_TOKEN}}@{{LOCAL_IP_WRAPPED}}/keystone -keystone::cron::token_flush::destination: '/dev/null' -keystone::roles::admin::password: {{UNDERCLOUD_ADMIN_PASSWORD}} -keystone::roles::admin::email: 'root@localhost' -keystone::roles::admin::admin_tenant: 'admin' -keystone::roles::admin::service_tenant: 'service' -keystone::token_expiration: 14400 -keystone::endpoint::public_url: {{UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC}} -keystone::endpoint::internal_url: {{UNDERCLOUD_ENDPOINT_KEYSTONE_INTERNAL}} -keystone::endpoint::admin_url: "%{hiera('keystone_identity_uri')}" -keystone::endpoint::region: "%{hiera('keystone_region')}" -keystone::endpoint::version: '' -keystone::wsgi::apache::ssl: false -keystone::wsgi::apache::bind_host: {{LOCAL_IP}} -keystone::notification_driver: messaging -keystone::notification_topics: notifications -keystone::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -keystone::enable_credential_setup: true -keystone::fernet_max_active_keys: 2 - -# MySQL -admin_password: {{UNDERCLOUD_ADMIN_PASSWORD}} -enable_galera: true -mysql_max_connections: '4096' -tripleo::profile::base::database::mysql::step: 2 -tripleo::profile::base::database::mysql::manage_resources: true -tripleo::profile::base::database::mysql::remove_default_accounts: true -tripleo::profile::base::database::mysql::mysql_server_options: - 'mysqld': - bind-address: "%{hiera('controller_host')}" - innodb_file_per_table: 'ON' - connect_timeout: 60 -mysql::server::restart: true -mysql::server::root_password: {{UNDERCLOUD_DB_PASSWORD}} - -# Neutron -neutron::debug: "%{hiera('debug')}" -neutron::bind_host: {{LOCAL_IP}} -neutron::core_plugin: ml2 -neutron::service_plugins: ['router', 'segments'] -neutron::dhcp_agents_per_network: 2 -neutron::dns_domain: {{OVERCLOUD_DOMAIN_NAME}} -neutron::server::api_workers: "%{::os_workers}" -neutron::server::rpc_workers: "%{::os_workers}" -neutron::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -neutron::keystone::authtoken::project_name: "%{hiera('neutron::keystone::auth::tenant')}" -neutron::server::notifications::project_name: "%{hiera('neutron::keystone::auth::tenant')}" -neutron::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -neutron::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -neutron::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -neutron::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -neutron::server::database_connection: mysql+pymysql://neutron:{{UNDERCLOUD_NEUTRON_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/neutron -neutron::server::sync_db: true -neutron::agents::ml2::ovs::local_ip: {{LOCAL_IP}} -neutron::plugins::ml2::mechanism_drivers: ['openvswitch', 'baremetal'] -neutron_bridge_mappings: ctlplane:br-ctlplane -neutron_public_interface: {{LOCAL_INTERFACE}} -neutron_physical_bridge: br-ctlplane -neutron::global_physnet_mtu: {{LOCAL_MTU}} -neutron::keystone::authtoken::password: {{UNDERCLOUD_NEUTRON_PASSWORD}} -neutron::agents::metadata::auth_password: {{UNDERCLOUD_NEUTRON_PASSWORD}} -neutron::agents::metadata::metadata_workers: "%{::os_workers}" -neutron::quota::quota_port: -1 -neutron::server::notifications::auth_url: "%{hiera('keystone_auth_uri')}" -neutron::server::notifications::tenant_name: service -neutron::server::notifications::password: {{UNDERCLOUD_NOVA_PASSWORD}} -neutron::keystone::auth::tenant: 'service' -neutron::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_NEUTRON_PUBLIC}} -neutron::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_NEUTRON_INTERNAL}} -neutron::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_NEUTRON_ADMIN}} -neutron::keystone::auth::password: {{UNDERCLOUD_NEUTRON_PASSWORD}} -neutron::keystone::auth::region: "%{hiera('keystone_region')}" -neutron::plugins::ml2::extension_drivers: 'port_security' -neutron::agents::ml2::networking_baremetal::user: 'ironic' -neutron::agents::ml2::networking_baremetal::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -neutron::agents::ml2::networking_baremetal::auth_url: {{UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC}} -neutron::agents::ml2::networking_baremetal::project_name: "%{hiera('neutron::keystone::auth::tenant')}" -neutron::agents::ml2::networking_baremetal::user_domain_name: "%{hiera('keystone_default_domain')}" -neutron::agents::ml2::networking_baremetal::project_domain_name: "%{hiera('keystone_default_domain')}" -neutron::agents::ml2::networking_baremetal::region_name: "%{hiera('keystone_region')}" - -# Ceilometer -ceilometer::debug: "%{hiera('debug')}" -ceilometer::metering_secret: {{UNDERCLOUD_CEILOMETER_METERING_SECRET}} -ceilometer::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -ceilometer::keystone::authtoken::password: {{UNDERCLOUD_CEILOMETER_PASSWORD}} -ceilometer::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -ceilometer::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -ceilometer::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -ceilometer::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -ceilometer::db::database_connection: mysql+pymysql://ceilometer:{{UNDERCLOUD_CEILOMETER_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/ceilometer -ceilometer::agent::auth::auth_password: {{UNDERCLOUD_CEILOMETER_PASSWORD}} -ceilometer_compute_agent: '' -ceilometer::snmpd_readonly_username: {{UNDERCLOUD_CEILOMETER_SNMPD_USER}} -ceilometer::snmpd_readonly_user_password: {{UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD}} -ceilometer::keystone::auth::tenant: 'service' -ceilometer::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_CEILOMETER_PUBLIC}} -ceilometer::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_CEILOMETER_INTERNAL}} -ceilometer::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_CEILOMETER_ADMIN}} -ceilometer::keystone::auth::password: {{UNDERCLOUD_CEILOMETER_PASSWORD}} -ceilometer::keystone::auth::region: "%{hiera('keystone_region')}" -ceilometer::dispatcher::gnocchi::url: {{UNDERCLOUD_ENDPOINT_GNOCCHI_INTERNAL}} -ceilometer::dispatcher::gnocchi::filter_project: 'service' -ceilometer::dispatcher::gnocchi::archive_policy: 'low' -ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' - -# events dispatcher config -ceilometer::agent::notification::event_pipeline_publishers: ['gnocchi://', 'panko://'] -ceilometer::agent::notification::manage_event_pipeline: true - -# Aodh -aodh::debug: "%{hiera('debug')}" -aodh::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -aodh::api::host: {{LOCAL_IP}} -aodh::keystone::authtoken::password: {{UNDERCLOUD_AODH_PASSWORD}} -aodh::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -aodh::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -ceilometer::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -ceilometer::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -aodh::api::service_name: 'httpd' -aodh::wsgi::apache::ssl: false -aodh::wsgi::apache::bind_host: {{LOCAL_IP}} -aodh::db::database_connection: mysql+pymysql://aodh:{{UNDERCLOUD_AODH_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/aodh -aodh::auth::auth_password: {{UNDERCLOUD_AODH_PASSWORD}} -aodh::keystone::auth::tenant: 'service' -aodh::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_AODH_PUBLIC}} -aodh::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_AODH_INTERNAL}} -aodh::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_AODH_ADMIN}} -aodh::keystone::auth::password: {{UNDERCLOUD_AODH_PASSWORD}} -aodh::keystone::auth::region: "%{hiera('keystone_region')}" - - -# Gnocchi -gnocchi::debug: "%{hiera('debug')}" -gnocchi_backend: 'file' -gnocchi::wsgi::apache::ssl: false -gnocchi::wsgi::apache::bind_host: {{LOCAL_IP}} -gnocchi::api::service_name: 'httpd' -gnocchi::api::host: {{LOCAL_IP}} -gnocchi::keystone::authtoken::password: {{UNDERCLOUD_GNOCCHI_PASSWORD}} -gnocchi::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -gnocchi::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -gnocchi::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -gnocchi::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -gnocchi::keystone::auth::tenant: 'service' -gnocchi::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_GNOCCHI_PUBLIC}} -gnocchi::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_GNOCCHI_INTERNAL}} -gnocchi::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_GNOCCHI_ADMIN}} -gnocchi::keystone::auth::password: {{UNDERCLOUD_GNOCCHI_PASSWORD}} -gnocchi::keystone::auth::region: "%{hiera('keystone_region')}" -gnocchi::db::mysql::password: {{UNDERCLOUD_GNOCCHI_PASSWORD}} -gnocchi::db::database_connection: mysql+pymysql://gnocchi:{{UNDERCLOUD_GNOCCHI_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/gnocchi -gnocchi::storage::swift::swift_user: 'service:gnocchi' -gnocchi::storage::swift::swift_auth_version: 2 -gnocchi::storage::swift::swift_authurl: "%{hiera('keystone_auth_uri')}" -gnocchi::storage::swift::swift_key: {{UNDERCLOUD_GNOCCHI_PASSWORD}} -#Gnocchi statsd -gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' -gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3' -gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616' -gnocchi::statsd::flush_delay: 10 -gnocchi::statsd::archive_policy_name: 'low' -gnocchi_healthcheck_url: {{UNDERCLOUD_ENDPOINT_GNOCCHI_PUBLIC}}/healthcheck - -# Panko -panko::logging::debug: "%{hiera('debug')}" -panko::wsgi::apache::ssl: false -panko::wsgi::apache::bind_host: {{LOCAL_IP}} -panko::api::service_name: 'httpd' -panko::api::host: {{LOCAL_IP}} -panko::db::mysql::password: {{UNDERCLOUD_PANKO_PASSWORD}} -panko::db::database_connection: mysql+pymysql://panko:{{UNDERCLOUD_PANKO_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/panko -panko::keystone::authtoken::password: {{UNDERCLOUD_PANKO_PASSWORD}} -panko::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -panko::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -panko::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -panko::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -panko::keystone::auth::tenant: 'service' -panko::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_PANKO_PUBLIC}} -panko::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_PANKO_INTERNAL}} -panko::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_PANKO_ADMIN}} -panko::keystone::auth::password: {{UNDERCLOUD_PANKO_PASSWORD}} -panko::keystone::auth::region: "%{hiera('keystone_region')}" -panko::keystone::authtoken::project_name: 'service' - -# Nova -nova::debug: "%{hiera('debug')}" -nova::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -nova::notification_driver: messaging -nova::rpc_response_timeout: '600' -nova::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -nova::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -nova::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -nova::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -nova::api::service_name: 'httpd' -nova::api::api_bind_address: {{LOCAL_IP}} -nova::api::enabled: true -nova::api::metadata_listen: {{LOCAL_IP}} -nova::keystone::authtoken::password: {{UNDERCLOUD_NOVA_PASSWORD}} -nova::api::enabled_apis: - - metadata -nova::api::sync_db_api: true -nova::api::osapi_compute_workers: "%{::os_workers}" -nova::api::metadata_workers: "%{::os_workers}" -nova::wsgi::apache_api::ssl: false -nova::wsgi::apache_api::bind_host: {{LOCAL_IP}} -nova::wsgi::apache_placement::ssl: false -nova::wsgi::apache_placement::bind_host: {{LOCAL_IP}} -nova::wsgi::apache_placement::api_port: '8778' -nova::placement::auth_url: "%{hiera('keystone_identity_uri')}" -nova::placement::password: {{UNDERCLOUD_NOVA_PASSWORD}} -nova::placement::project_name: 'service' -nova::placement::os_region_name: "%{hiera('keystone_region')}" -nova::conductor::enabled: true -nova::conductor::workers: "%{::os_workers}" -nova::database_connection: mysql+pymysql://nova:{{UNDERCLOUD_NOVA_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/nova -nova::api_database_connection: mysql+pymysql://nova_api:{{UNDERCLOUD_NOVA_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/nova_api -nova::placement_database_connection: mysql+pymysql://nova_placement:{{UNDERCLOUD_NOVA_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/nova_placement -nova::notify_on_state_change: 'vm_and_task_state' -nova::scheduler::enabled: true -nova::network::neutron::dhcp_domain: '' -nova::compute::force_config_drive: true -nova::compute::reserved_host_memory: '0' -nova::compute::vnc_enabled: false -nova::compute::instance_usage_audit: true -nova::compute::instance_usage_audit_period: 'hour' -nova::compute::consecutive_build_service_disable_threshold: 0 -nova::cron::archive_deleted_rows::destination: '/dev/null' -nova::compute::sync_power_state_interval: -1 - -nova::ironic::common::username: 'ironic' -nova::ironic::common::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -nova::ironic::common::project_name: 'service' -nova::ironic::common::api_endpoint: "{{UNDERCLOUD_ENDPOINT_IRONIC_PUBLIC}}/v1" -nova::ironic::common::auth_url: "%{hiera('keystone_identity_uri')}" - -nova::network::neutron::neutron_auth_url: "%{hiera('keystone_auth_uri')}" -nova::network::neutron::neutron_url: {{UNDERCLOUD_ENDPOINT_NEUTRON_PUBLIC}} -nova::network::neutron::neutron_password: "%{hiera('neutron::keystone::authtoken::password')}" -nova::network::neutron::neutron_project_name: "%{hiera('neutron::keystone::auth::tenant')}" -nova::network::neutron::neutron_region_name: '' - -nova::ram_allocation_ratio: '1.0' -nova::scheduler::filter::scheduler_max_attempts: {{SCHEDULER_MAX_ATTEMPTS}} -nova::scheduler::filter::scheduler_available_filters: ['tripleo_common.filters.list.tripleo_filters'] -nova::scheduler::filter::scheduler_default_filters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter'] - -nova::keystone::auth::tenant: 'service' -nova::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_NOVA_PUBLIC}} -nova::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_NOVA_INTERNAL}} -nova::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_NOVA_ADMIN}} -nova::keystone::auth::password: {{UNDERCLOUD_NOVA_PASSWORD}} -nova::keystone::auth::region: "%{hiera('keystone_region')}" -nova::keystone::auth::configure_ec2_endpoint: false - -nova::keystone::auth_placement::tenant: 'service' -nova::keystone::auth_placement::public_url: {{UNDERCLOUD_ENDPOINT_PLACEMENT_PUBLIC}} -nova::keystone::auth_placement::internal_url: {{UNDERCLOUD_ENDPOINT_PLACEMENT_INTERNAL}} -nova::keystone::auth_placement::admin_url: {{UNDERCLOUD_ENDPOINT_PLACEMENT_ADMIN}} -nova::keystone::auth_placement::password: {{UNDERCLOUD_NOVA_PASSWORD}} -nova::keystone::auth_placement::region: "%{hiera('keystone_region')}" - -nova::glance_api_servers: {{UNDERCLOUD_ENDPOINT_GLANCE_INTERNAL}} - -# NOTE(aschultz): raise upper limit on nova DB syncs for undercloud only. -# There is no way this should take 15 minutes and if it does we now have way -# different problems. But rather than block undercloud installs let's increase -# the timeout for these actions. See LP#1661396 for more details. -nova::db::sync::db_sync_timeout: 900 -nova::db::sync_api::db_sync_timeout: 900 - -# Ironic -ironic::debug: "%{hiera('debug')}" -ironic::my_ip: {{LOCAL_IP}} -ironic::db_online_data_migrations: true -# TODO(dtantsur): remove when support for classic drivers is removed -ironic::db::online_data_migrations::migration_params: "--option migrate_to_hardware_types.reset_unsupported_interfaces=true" -ironic::rpc_response_timeout: 600 -ironic::api::authtoken::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::api::authtoken::auth_uri: "%{hiera('keystone_auth_uri')}" -ironic::api::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -ironic::api::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -ironic::api::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -ironic::api::host_ip: {{LOCAL_IP}} -ironic::api::service_name: 'httpd' -ironic::api::workers: "%{::os_workers}" -ironic::wsgi::apache::ssl: false -ironic::wsgi::apache::bind_host: {{LOCAL_IP}} -ironic::pxe::tftp_bind_host: {{LOCAL_IP}} -ironic::database_connection: mysql+pymysql://ironic:{{UNDERCLOUD_IRONIC_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/ironic -ironic::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -ironic::drivers::inspector::enabled: true -ironic::drivers::inspector::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::drivers::inspector::auth_url: "%{hiera('keystone_identity_uri')}" -ironic::drivers::inspector::user_domain_name: "%{hiera('keystone_default_domain')}" -ironic::drivers::inspector::project_domain_name: "%{hiera('keystone_default_domain')}" -ironic::glance::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::glance::auth_url: "%{hiera('keystone_identity_uri')}" -ironic::neutron::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::neutron::auth_url: "%{hiera('keystone_identity_uri')}" -ironic::service_catalog::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::service_catalog::auth_url: "%{hiera('keystone_identity_uri')}" -ironic::swift::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::swift::auth_url: "%{hiera('keystone_identity_uri')}" -# Ironic conductor forces deployments to use http -# https://bugs.launchpad.net/tripleo/+bug/1613088 -ironic::conductor::api_url: {{UNDERCLOUD_ENDPOINT_IRONIC_INTERNAL}} -ironic::conductor::force_power_state_during_sync: false -ironic::conductor::automated_clean: {{CLEAN_NODES}} -ironic::conductor::cleaning_disk_erase: 'metadata' -ironic::conductor::cleaning_network: 'ctlplane' -ironic::conductor::provisioning_network: 'ctlplane' -ironic::conductor::default_boot_option: 'local' -ironic::conductor::enabled_hardware_types: {{ENABLED_HARDWARE_TYPES}} -ironic::drivers::interfaces::default_inspect_interface: inspector -ironic::drivers::interfaces::enabled_boot_interfaces: {{ENABLED_BOOT_INTERFACES}} -ironic::drivers::interfaces::enabled_console_interfaces: ['no-console', 'ipmitool-socat'] -ironic::drivers::interfaces::enabled_deploy_interfaces: ['iscsi', 'direct', 'ansible'] -ironic::drivers::interfaces::enabled_inspect_interfaces: ['no-inspect', 'inspector'] -ironic::drivers::interfaces::enabled_management_interfaces: {{ENABLED_MANAGEMENT_INTERFACES}} -ironic::drivers::interfaces::enabled_power_interfaces: {{ENABLED_POWER_INTERFACES}} -ironic::drivers::interfaces::enabled_raid_interfaces: {{ENABLED_RAID_INTERFACES}} -ironic::drivers::interfaces::enabled_vendor_interfaces: {{ENABLED_VENDOR_INTERFACES}} - -# Make sure new nodes default to 'baremetal' resource class -ironic::default_resource_class: 'baremetal' - -ironic::keystone::auth::tenant: 'service' -ironic::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_IRONIC_PUBLIC}} -ironic::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_IRONIC_INTERNAL}} -ironic::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_IRONIC_ADMIN}} -ironic::keystone::auth::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::keystone::auth::region: "%{hiera('keystone_region')}" - -ironic::keystone::auth_inspector::tenant: 'service' -ironic::keystone::auth_inspector::public_url: {{UNDERCLOUD_ENDPOINT_IRONIC_INSPECTOR_PUBLIC}} -ironic::keystone::auth_inspector::internal_url: {{UNDERCLOUD_ENDPOINT_IRONIC_INSPECTOR_INTERNAL}} -ironic::keystone::auth_inspector::admin_url: {{UNDERCLOUD_ENDPOINT_IRONIC_INSPECTOR_ADMIN}} -ironic::keystone::auth_inspector::password: {{UNDERCLOUD_IRONIC_PASSWORD}} -ironic::keystone::auth_inspector::region: "%{hiera('keystone_region')}" - -# Ironic Inspector -ironic::inspector::listen_address: {{LOCAL_IP}} -ironic::inspector::debug: "%{hiera('debug')}" -{{#IPXE_ENABLED}} -ironic::inspector::pxe_transfer_protocol: 'http' -{{/IPXE_ENABLED}} -ironic::inspector::authtoken::auth_uri: "%{hiera('keystone_auth_uri')}" -ironic::inspector::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -ironic::inspector::authtoken::username: 'ironic' -ironic::inspector::authtoken::password: "%{hiera('ironic::api::authtoken::password')}" -ironic::inspector::authtoken::project_name: 'service' -ironic::inspector::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -ironic::inspector::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -ironic::inspector::db::database_connection: mysql+pymysql://ironic-inspector:{{UNDERCLOUD_IRONIC_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/ironic-inspector -ironic::inspector::keep_ports: 'added' -ironic::inspector::ironic_username: 'ironic' -ironic::inspector::ironic_password: "%{hiera('ironic::api::authtoken::password')}" -ironic::inspector::ironic_tenant_name: 'service' -ironic::inspector::ironic_project_domain_name: 'Default' -ironic::inspector::ironic_user_domain_name: 'Default' -ironic::inspector::ironic_auth_url: "%{hiera('keystone_auth_uri')}" -ironic::inspector::ironic_max_retries: 6 -ironic::inspector::ironic_retry_interval: 10 -ironic::inspector::store_data: 'swift' -ironic::inspector::swift_username: 'ironic' -ironic::inspector::swift_password: "%{hiera('ironic::api::authtoken::password')}" -ironic::inspector::swift_tenant_name: 'service' -ironic::inspector::swift_project_domain_name: 'Default' -ironic::inspector::swift_user_domain_name: 'Default' -ironic::inspector::swift_auth_url: "%{hiera('keystone_auth_uri')}" -ironic::inspector::dnsmasq_local_ip: {{LOCAL_IP}} -ironic::inspector::dnsmasq_interface: {{INSPECTION_INTERFACE}} -ironic::inspector::dnsmasq_ip_subnets: {{{INSPECTION_SUBNETS}}} -ironic::inspector::pxe_filter::driver: dnsmasq -ironic::inspector::pxe_filter::dnsmasq::dnsmasq_start_command: 'systemctl start openstack-ironic-inspector-dnsmasq.service' -ironic::inspector::pxe_filter::dnsmasq::dnsmasq_stop_command: 'systemctl stop openstack-ironic-inspector-dnsmasq.service' -ironic::inspector::dnsmasq_dhcp_hostsdir: '/var/lib/ironic-inspector/dhcp-hostsdir' -ironic::inspector::ramdisk_collectors: {{INSPECTION_COLLECTORS}} -ironic::inspector::additional_processing_hooks: 'extra_hardware,lldp_basic,local_link_connection' -ironic::inspector::ramdisk_kernel_args: {{INSPECTION_KERNEL_ARGS}} -ironic::inspector::ipxe_timeout: 60 -ironic::inspector::node_not_found_hook: {{INSPECTION_NODE_NOT_FOUND_HOOK}} -ironic::inspector::discovery_default_driver: {{DISCOVERY_DEFAULT_DRIVER}} -ironic::inspector::detect_boot_mode: true - -# Ironic PXE driver -ironic::drivers::pxe::ipxe_timeout: 60 - -# Ironic deploy utils -ironic_ipxe_port: 8088 -ironic::conductor::http_url: "http://{{LOCAL_IP_WRAPPED}}:%{hiera('ironic_ipxe_port')}" -ironic::conductor::http_boot: '/httpboot' -ironic::inspector::http_port: "%{hiera('ironic_ipxe_port')}" - -# Ironic pxe -ironic::drivers::pxe::ipxe_enabled: {{IPXE_ENABLED}} -# NOTE(dtantsur): UEFI only works with iPXE currently for us -ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template' -ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi' - -# Ironic agent -ironic::drivers::agent::deploy_logs_collect: 'always' -ironic::drivers::agent::deploy_logs_storage_backend: 'local' -ironic::drivers::agent::deploy_logs_local_path: '/var/log/ironic/deploy/' - -# Ironic power and management drivers tuning -ironic::drivers::ilo::default_boot_mode: 'bios' - -# Customisations for ppc64le -{{#ENABLE_ARCHITECTURE_PPC64LE}} -ironic::pxe::enable_ppc64le: true -ironic::inspector::enable_ppc64le: true -ironic::conductor::power_state_change_timeout: 60 -ironic::drivers::ipmi::command_retry_timeout: 120 -ironic::drivers::ipmi::min_command_interval: 15 -{{/ENABLE_ARCHITECTURE_PPC64LE}} - -# Rabbit -rabbit_cookie: {{UNDERCLOUD_RABBIT_COOKIE}} -rabbitmq::delete_guest_user: false -rabbitmq::node_ip_address: {{LOCAL_IP}} -rabbitmq::management_ip_address: {{LOCAL_IP}} -rabbitmq::package_source: undef -rabbitmq::port: 5672 -rabbitmq::repos_ensure: false -rabbitmq::wipe_db_on_cookie_change: true -rabbitmq::default_user: {{UNDERCLOUD_RABBIT_USERNAME}} -rabbitmq::default_pass: {{UNDERCLOUD_RABBIT_PASSWORD}} - -# Horizon -horizon::django_debug: "%{hiera('debug')}" -horizon_secret_key: {{UNDERCLOUD_HORIZON_SECRET_KEY}} -horizon::allowed_hosts: - - "%{::fqdn}" - - "{{LOCAL_IP}}" -horizon::wsgi::apache::priority: 10 -horizon::openstack_endpoint_type: internalURL - -# Mistral -mistral::debug: "%{hiera('debug')}" -mistral::api::bind_host: {{LOCAL_IP}} -mistral::api::api_workers: "%{::os_workers}" -mistral::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -mistral::database_connection: mysql+pymysql://mistral:{{UNDERCLOUD_MISTRAL_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/mistral -mistral::rpc_backend: rabbit -mistral::rpc_response_timeout: 120 -mistral::cron_trigger::execution_interval: 600 -mistral::keystone::authtoken::password: {{UNDERCLOUD_MISTRAL_PASSWORD}} -mistral::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -mistral::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -mistral::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -mistral::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" - -mistral::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_MISTRAL_PUBLIC}} -mistral::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_MISTRAL_INTERNAL}} -mistral::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_MISTRAL_ADMIN}} -mistral::keystone::auth::region: "%{hiera('keystone_region')}" -mistral::keystone::auth::password: {{UNDERCLOUD_MISTRAL_PASSWORD}} -mistral::keystone::auth::tenant: 'service' -mistral::engine::older_than: 2880 -mistral::engine::evaluation_interval: 120 -mistral::engine::execution_field_size_limit_kb: 16384 - -# Zaqar -zaqar::keystone::authtoken::project_name: 'service' -zaqar::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -zaqar::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -zaqar::keystone::authtoken::password: {{UNDERCLOUD_ZAQAR_PASSWORD}} -zaqar::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -zaqar::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -zaqar::keystone::auth::tenant: 'service' -zaqar::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_ZAQAR_PUBLIC}} -zaqar::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_ZAQAR_INTERNAL}} -zaqar::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_ZAQAR_ADMIN}} -zaqar::keystone::auth::region: "%{hiera('keystone_region')}" -zaqar::keystone::auth::password: {{UNDERCLOUD_ZAQAR_PASSWORD}} -zaqar::keystone::auth::roles: - - admin - - ResellerAdmin -zaqar::keystone::auth_websocket::tenant: 'service' -zaqar::keystone::auth_websocket::public_url: {{UNDERCLOUD_ENDPOINT_ZAQAR_WEBSOCKET_PUBLIC}} -zaqar::keystone::auth_websocket::internal_url: {{UNDERCLOUD_ENDPOINT_ZAQAR_WEBSOCKET_INTERNAL}} -zaqar::keystone::auth_websocket::admin_url: {{UNDERCLOUD_ENDPOINT_ZAQAR_WEBSOCKET_ADMIN}} -zaqar::keystone::auth_websocket::region: "%{hiera('keystone_region')}" -zaqar::keystone::auth_websocket::password: {{UNDERCLOUD_ZAQAR_PASSWORD}} -zaqar::server::service_name: 'httpd' -zaqar::unreliable: true -zaqar::transport::websocket::bind: {{LOCAL_IP}} -zaqar::transport::websocket::notification_bind: {{LOCAL_IP}} -zaqar::wsgi::apache::bind_host: {{LOCAL_IP}} -zaqar::wsgi::apache::ssl: false -zaqar::message_store: swift -zaqar::management_store: sqlalchemy -zaqar::management::sqlalchemy::uri: mysql+pymysql://zaqar:{{UNDERCLOUD_ZAQAR_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/zaqar -zaqar::messaging::swift::uri: swift://zaqar:{{UNDERCLOUD_ZAQAR_PASSWORD}}@/service -zaqar::messaging::swift::auth_url: "%{hiera('keystone_auth_uri')}" -zaqar::message_pipeline: 'zaqar.notification.notifier' -zaqar::max_messages_post_size: 1048576 - -# Cinder -cinder::debug: "%{hiera('debug')}" -cinder_backend_name: 'undercloud_iscsi' -cinder_enable_test_volume: false -cinder_iscsi_address: {{LOCAL_IP}} -cinder::api::enable_proxy_headers_parsing: true -cinder::api::service_name: 'httpd' -cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL' -cinder::backends::enabled_backends: ["%{hiera('cinder_backend_name')}"] -cinder::cron::db_purge::destination: "/dev/null" -cinder::database_connection: mysql+pymysql://cinder:{{UNDERCLOUD_CINDER_PASSWORD}}@{{LOCAL_IP_WRAPPED}}/cinder -cinder::db::database_db_max_retries: -1 -cinder::db::database_max_retries: -1 -cinder::debug: "%{hiera('debug')}" -cinder::glance::glance_api_servers: {{UNDERCLOUD_ENDPOINT_GLANCE_INTERNAL}} -cinder::keystone::auth::tenant: 'service' -cinder::keystone::auth::public_url: {{UNDERCLOUD_ENDPOINT_CINDER_PUBLIC}} -cinder::keystone::auth::internal_url: {{UNDERCLOUD_ENDPOINT_CINDER_INTERNAL}} -cinder::keystone::auth::admin_url: {{UNDERCLOUD_ENDPOINT_CINDER_ADMIN}} -cinder::keystone::auth::public_url_v2: {{UNDERCLOUD_ENDPOINT_CINDER_V2_PUBLIC}} -cinder::keystone::auth::internal_url_v2: {{UNDERCLOUD_ENDPOINT_CINDER_V2_INTERNAL}} -cinder::keystone::auth::admin_url_v2: {{UNDERCLOUD_ENDPOINT_CINDER_V2_ADMIN}} -cinder::keystone::auth::public_url_v3: {{UNDERCLOUD_ENDPOINT_CINDER_V3_PUBLIC}} -cinder::keystone::auth::internal_url_v3: {{UNDERCLOUD_ENDPOINT_CINDER_V3_INTERNAL}} -cinder::keystone::auth::admin_url_v3: {{UNDERCLOUD_ENDPOINT_CINDER_V3_ADMIN}} -cinder::keystone::auth::region: "%{hiera('keystone_region')}" -cinder::keystone::auth::password: {{UNDERCLOUD_CINDER_PASSWORD}} -cinder::keystone::authtoken::project_name: 'service' -cinder::keystone::authtoken::www_authenticate_uri: "%{hiera('keystone_auth_uri')}" -cinder::keystone::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -cinder::keystone::authtoken::password: {{UNDERCLOUD_CINDER_PASSWORD}} -cinder::keystone::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -cinder::keystone::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -cinder::default_transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler -cinder::setup_test_volume::size: '10280M' -cinder::wsgi::apache::bind_host: {{LOCAL_IP}} -cinder::wsgi::apache::ssl: false -cinder::wsgi::apache::workers: "%{::os_workers}" - -# HAproxy -tripleo::profile::base::haproxy::step: 1 -tripleo::haproxy::haproxy_stats_password: {{UNDERCLOUD_HAPROXY_STATS_PASSWORD}} -tripleo::haproxy::controller_virtual_ip: "%{hiera('controller_admin_host')}" -tripleo::haproxy::controller_hosts: "%{hiera('controller_host')}" -tripleo::haproxy::public_virtual_ip: "%{hiera('controller_public_host')}" -tripleo::haproxy::public_virtual_interface: 'br-ctlplane' -tripleo::haproxy::keystone_admin: true -tripleo::haproxy::keystone_public: true -tripleo::haproxy::neutron: true -tripleo::haproxy::glance_api: true -tripleo::haproxy::glance_registry: true -tripleo::haproxy::nova_osapi: true -tripleo::haproxy::nova_placement: true -tripleo::haproxy::nova_metadata: true -tripleo::haproxy::swift_proxy_server: true -tripleo::haproxy::heat_api: true -tripleo::haproxy::ceilometer: "%{hiera('enable_telemetry')}" -tripleo::haproxy::aodh: "%{hiera('enable_telemetry')}" -tripleo::haproxy::gnocchi: "%{hiera('enable_telemetry')}" -tripleo::haproxy::panko: "%{hiera('enable_telemetry')}" -tripleo::haproxy::ironic: true -tripleo::haproxy::ironic_inspector: true -tripleo::haproxy::rabbitmq: true -tripleo::haproxy::mistral: true -tripleo::haproxy::zaqar_api: true -tripleo::haproxy::zaqar_ws: true -tripleo::haproxy::docker_registry: true - -# Docker -tripleo::profile::base::docker::step: 1 -# Undercloud should not have --iptables=false by default hence this override (LP#1709325) -tripleo::profile::base::docker::docker_options: '--log-driver=journald --signature-verification=false' -{{#DOCKER_REGISTRY_MIRROR}} -tripleo::profile::base::docker::registry_mirror: {{DOCKER_REGISTRY_MIRROR}} -{{/DOCKER_REGISTRY_MIRROR}} -tripleo::profile::base::docker::debug: "%{hiera('debug')}" -tripleo::profile::base::docker::insecure_registries: {{DOCKER_INSECURE_REGISTRIES}} - -# Keepalived -tripleo::keepalived::controller_virtual_ip: "%{hiera('controller_admin_host')}" -tripleo::keepalived::control_virtual_interface: 'br-ctlplane' -tripleo::keepalived::public_virtual_ip: "%{hiera('controller_public_host')}" -tripleo::keepalived::public_virtual_interface: 'br-ctlplane' -tripleo::keepalived::virtual_router_id_base: 40 - -# UI -keystone::cors::allowed_origin: '*' -nova::cors::allowed_origin: '*' -nova::cors::max_age: 3600 -nova::cors::allow_methods: 'GET,POST,PUT,DELETE,OPTIONS,PATCH' -nova::cors::allow_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token' -nova::cors::expose_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma' -ironic::cors::allowed_origin: '*' -ironic::cors::max_age: 3600 -ironic::cors::allow_methods: 'GET,POST,PUT,DELETE,OPTIONS,PATCH' -ironic::cors::allow_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token' -ironic::cors::expose_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma' -ironic::inspector::cors::allowed_origin: '*' -ironic::inspector::cors::max_age: 3600 -ironic::inspector::cors::allow_methods: 'GET,POST,PUT,DELETE,OPTIONS,PATCH' -ironic::inspector::cors::allow_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token' -ironic::inspector::cors::expose_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma' -heat::cors::allowed_origin: '*' -heat::cors::max_age: 3600 -heat::cors::allow_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token' -heat::cors::expose_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma' -mistral::cors::allowed_origin: '*' -mistral::cors::allow_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token' -mistral::cors::expose_headers: 'Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma' -swift::proxy::cors_allow_origin: '*' -tripleo::ui::endpoint_proxy_zaqar: {{UNDERCLOUD_ENDPOINT_ZAQAR_UI_PROXY_INTERNAL}} -tripleo::ui::endpoint_proxy_keystone: {{UNDERCLOUD_ENDPOINT_KEYSTONE_INTERNAL}} -tripleo::ui::endpoint_proxy_heat: {{UNDERCLOUD_ENDPOINT_HEAT_UI_PROXY_INTERNAL}} -tripleo::ui::endpoint_proxy_ironic: {{UNDERCLOUD_ENDPOINT_IRONIC_INTERNAL}} -tripleo::ui::endpoint_proxy_ironic_inspector: {{UNDERCLOUD_ENDPOINT_IRONIC_INSPECTOR_INTERNAL}} -tripleo::ui::endpoint_proxy_mistral: {{UNDERCLOUD_ENDPOINT_MISTRAL_UI_PROXY_INTERNAL}} -tripleo::ui::endpoint_proxy_nova: {{UNDERCLOUD_ENDPOINT_NOVA_UI_PROXY_INTERNAL}} -tripleo::ui::endpoint_proxy_swift: {{UNDERCLOUD_ENDPOINT_SWIFT_UI_PROXY_INTERNAL}} -tripleo::ui::endpoint_config_zaqar: {{UNDERCLOUD_ENDPOINT_ZAQAR_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_keystone: {{UNDERCLOUD_ENDPOINT_KEYSTONE_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_heat: {{UNDERCLOUD_ENDPOINT_HEAT_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_ironic: {{UNDERCLOUD_ENDPOINT_IRONIC_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_ironic_inspector: {{UNDERCLOUD_ENDPOINT_IRONIC_INSPECTOR_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_mistral: {{UNDERCLOUD_ENDPOINT_MISTRAL_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_nova: {{UNDERCLOUD_ENDPOINT_NOVA_UI_CONFIG_PUBLIC}} -tripleo::ui::endpoint_config_swift: {{UNDERCLOUD_ENDPOINT_SWIFT_UI_CONFIG_PUBLIC}} - -# service tenant -ceilometer::keystone::authtoken::project_name: 'service' -aodh::keystone::authtoken::project_name: 'service' -gnocchi::keystone::authtoken::project_name: 'service' -cinder::keystone::authtoken::project_name: 'service' -heat::keystone::authtoken::project_name: 'service' -glance::api::authtoken::project_name: 'service' -glance::registry::authtoken::project_name: 'service' -ironic::api::authtoken::project_name: 'service' -ironic::drivers::inspector::project_name: 'service' -ironic::glance::project_name: 'service' -ironic::neutron::project_name: 'service' -ironic::service_catalog::project_name: 'service' -ironic::swift::project_name: 'service' -nova::keystone::authtoken::project_name: 'service' -swift::proxy::authtoken::project_name: 'service' -mistral::keystone::authtoken::project_name: 'service' - -swift::proxy::workers: "%{::os_workers}" -# Options -enable_tempest: {{ENABLE_TEMPEST}} -enable_validations: {{ENABLE_VALIDATIONS}} -enable_telemetry: {{ENABLE_TELEMETRY}} -enable_ui: {{ENABLE_UI}} -enable_cinder: {{ENABLE_CINDER}} -enable_container_images_build: {{ENABLE_CONTAINER_IMAGES_BUILD}} - -# Path to install configuration files -tripleo_install_user: {{TRIPLEO_INSTALL_USER}} -tripleo_undercloud_conf_file: {{TRIPLEO_UNDERCLOUD_CONF_FILE}} -tripleo_undercloud_password_file: {{TRIPLEO_UNDERCLOUD_PASSWORD_FILE}} - -# Novajoin -{{#ENABLE_NOVAJOIN}} -novajoin_listen_port: 9090 -nova::metadata::novajoin::api::bind_address: "{{LOCAL_IP}}" -nova::metadata::novajoin::api::join_listen_port: "%{hiera('novajoin_listen_port')}" -nova::metadata::novajoin::api::keystone_auth_url: "%{hiera('keystone_auth_uri')}" -nova::metadata::novajoin::api::service_password: {{UNDERCLOUD_NOVAJOIN_PASSWORD}} -nova::metadata::novajoin::api::transport_url: "rabbit://{{UNDERCLOUD_RABBIT_USERNAME}}:{{UNDERCLOUD_RABBIT_PASSWORD}}@{{LOCAL_IP_WRAPPED}}//" -nova::metadata::novajoin::authtoken::auth_url: "%{hiera('keystone_identity_uri')}" -nova::metadata::novajoin::authtoken::auth_uri: "%{hiera('keystone_auth_uri')}" -nova::metadata::novajoin::authtoken::password: {{UNDERCLOUD_NOVAJOIN_PASSWORD}} -nova::metadata::novajoin::authtoken::project_name: 'service' -nova::metadata::novajoin::authtoken::user_domain_name: "%{hiera('keystone_default_domain')}" -nova::metadata::novajoin::authtoken::project_domain_name: "%{hiera('keystone_default_domain')}" -nova::metadata::novajoin::auth::tenant: 'service' -nova::metadata::novajoin::auth::password: {{UNDERCLOUD_NOVAJOIN_PASSWORD}} -nova::metadata::novajoin::auth::region: "%{hiera('keystone_region')}" -ipaclient::password: {{IPA_OTP}} -ipaclient::hostname: {{UNDERCLOUD_HOSTNAME}} -enable_novajoin: true -nova::api::vendordata_jsonfile_path: '/etc/novajoin/cloud-config-novajoin.json' -nova::api::vendordata_providers: ['StaticJSON', 'DynamicJSON'] -nova::api::vendordata_dynamic_targets: ["join@http://{{LOCAL_IP}}:%{hiera('novajoin_listen_port')}/v1/"] -nova::api::vendordata_dynamic_failure_fatal: true -nova::api::vendordata_dynamic_auth_auth_type: 'password' -nova::api::vendordata_dynamic_auth_auth_url: "%{hiera('keystone_auth_uri')}" -nova::api::vendordata_dynamic_auth_os_region_name: "%{hiera('keystone_region')}" -nova::api::vendordata_dynamic_auth_username: 'nova' -nova::api::vendordata_dynamic_auth_project_name: 'service' -nova::api::vendordata_dynamic_auth_project_domain_name: 'Default' -nova::api::vendordata_dynamic_auth_user_domain_name: 'Default' -nova::api::vendordata_dynamic_auth_password: {{UNDERCLOUD_NOVA_PASSWORD}} -nova::api::vendordata_dynamic_connect_timeout: 30 -nova::api::vendordata_dynamic_read_timeout: 30 -nova::notification_topics: ['notifications', 'novajoin_notifications'] -nova::notify_on_state_change: 'vm_state' -{{/ENABLE_NOVAJOIN}} - -# Firewall -tripleo::firewall::manage_firewall: true -tripleo::firewall::firewall_rules: - '105 ntp': - dport: 123 - proto: udp - '106 vrrp': - proto: vrrp - '107 haproxy stats': - dport: 1993 - '108 redis': - dport: - - 6379 - - 26379 - '110 ceph': - dport: - - 6789 - - '6800-6810' - '111 keystone': - dport: - - 5000 - - 13000 - - 35357 - - 13357 - '112 glance': - dport: - - 9292 - - 9191 - - 13292 - '113 nova': - dport: - - 6080 - - 13080 - - 8773 - - 13773 - - 8774 - - 13774 - - 8778 - - 13778 - - 8775 - - 13775 - '114 neutron server': - dport: - - 9696 - - 13696 - '115 neutron dhcp input': - proto: 'udp' - dport: 67 - '116 neutron dhcp output': - proto: 'udp' - chain: 'OUTPUT' - dport: 68 - '118 neutron vxlan networks': - proto: 'udp' - dport: 4789 - '119 cinder': - dport: - - 8776 - - 13776 - '120 iscsi initiator': - dport: 3260 - '121 memcached': - dport: 11211 - proto: tcp - source: '127.0.0.1' - '122 swift proxy': - dport: - - 8080 - - 13808 - '123 swift storage': - dport: - - 873 - - 6000 - - 6001 - - 6002 - '125 heat': - dport: - - 8000 - - 13800 - - 8003 - - 13003 - - 8004 - - 13004 - '126 horizon': - dport: - - 80 - - 443 - '127 snmp': - dport: 161 - proto: 'udp' - '128 aodh': - dport: - - 8042 - - 13042 - '129 gnocchi-api': - dport: - - 8041 - - 13041 - '130 tftp': - dport: 69 - proto: udp - '131 novnc': - dport: 5900-5999 - proto: tcp - '132 mistral': - dport: - - 8989 - - 13989 - '133 zaqar': - dport: - - 8888 - - 13888 - '134 zaqar websockets': - dport: 9000 - '135 ironic': - dport: - - 6385 - - 13385 - '136 trove': - dport: - - 8779 - - 13779 - '137 ironic-inspector': - dport: 5050 - '138 docker registry': - dport: - - 8787 - - 13787 - '139 apache vhost': - dport: "%{hiera('ironic_ipxe_port')}" - # 140 network cidr nat rules - {{SUBNETS_CIDR_NAT_RULES}} - '142 tripleo-ui': - dport: - - 3000 - - 443 - '143 panko-api': - dport: - - 8977 - - 13977 diff --git a/elements/undercloud-install/element-provides b/elements/undercloud-install/element-provides deleted file mode 100644 index a72e04969..000000000 --- a/elements/undercloud-install/element-provides +++ /dev/null @@ -1 +0,0 @@ -operating-system diff --git a/elements/undercloud-install/os-apply-config/etc/os-net-config/config.json b/elements/undercloud-install/os-apply-config/etc/os-net-config/config.json deleted file mode 100644 index e6f6540ea..000000000 --- a/elements/undercloud-install/os-apply-config/etc/os-net-config/config.json +++ /dev/null @@ -1,3 +0,0 @@ -{{#os_net_config}} -{{.}} -{{/os_net_config}} diff --git a/elements/undercloud-install/os-apply-config/root/stackrc b/elements/undercloud-install/os-apply-config/root/stackrc deleted file mode 100644 index 4e51fa6a9..000000000 --- a/elements/undercloud-install/os-apply-config/root/stackrc +++ /dev/null @@ -1,46 +0,0 @@ -# Clear any old environment that may conflict. -for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done -NOVA_VERSION=1.1 -export NOVA_VERSION -OS_PASSWORD={{admin_password}} -export OS_PASSWORD -OS_AUTH_TYPE=password -export OS_AUTH_TYPE -{{#service_certificate}} -OS_AUTH_URL=https://{{public_host}}:13000/ -PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available" -export OS_AUTH_URL -export PYTHONWARNINGS -{{/service_certificate}} -{{^service_certificate}} -OS_AUTH_URL=http://{{local-ip-wrapped}}:5000/ -export OS_AUTH_URL -{{/service_certificate}} -OS_USERNAME=admin -OS_PROJECT_NAME=admin -COMPUTE_API_VERSION=1.1 -# 1.34 is the latest API version in Ironic Pike supported by ironicclient -IRONIC_API_VERSION=1.34 -OS_BAREMETAL_API_VERSION=$IRONIC_API_VERSION -OS_NO_CACHE=True -OS_CLOUDNAME=undercloud -export OS_USERNAME -export OS_PROJECT_NAME -export COMPUTE_API_VERSION -export IRONIC_API_VERSION -export OS_BAREMETAL_API_VERSION -export OS_NO_CACHE -export OS_CLOUDNAME -OS_IDENTITY_API_VERSION='3' -export OS_IDENTITY_API_VERSION -OS_PROJECT_DOMAIN_NAME='Default' -export OS_PROJECT_DOMAIN_NAME -OS_USER_DOMAIN_NAME='Default' -export OS_USER_DOMAIN_NAME - -# Add OS_CLOUDNAME to PS1 -if [ -z "${CLOUDPROMPT_ENABLED:-}" ]; then - export PS1=${PS1:-""} - export PS1=\${OS_CLOUDNAME:+"(\$OS_CLOUDNAME)"}\ $PS1 - export CLOUDPROMPT_ENABLED=1 -fi diff --git a/elements/undercloud-install/os-apply-config/root/stackrc.oac b/elements/undercloud-install/os-apply-config/root/stackrc.oac deleted file mode 100644 index f2c9e8a5c..000000000 --- a/elements/undercloud-install/os-apply-config/root/stackrc.oac +++ /dev/null @@ -1 +0,0 @@ -mode: 0600 diff --git a/elements/undercloud-install/os-apply-config/root/tripleo-undercloud-passwords b/elements/undercloud-install/os-apply-config/root/tripleo-undercloud-passwords deleted file mode 100644 index a7cc2c802..000000000 --- a/elements/undercloud-install/os-apply-config/root/tripleo-undercloud-passwords +++ /dev/null @@ -1,24 +0,0 @@ -UNDERCLOUD_ADMIN_PASSWORD=$(sudo hiera admin_password) -UNDERCLOUD_ADMIN_TOKEN=$(sudo hiera keystone::admin_token) -UNDERCLOUD_CEILOMETER_METERING_SECRET=$(sudo hiera ceilometer::metering_secret) -UNDERCLOUD_CEILOMETER_PASSWORD=$(sudo hiera ceilometer::keystone::authtoken::password) -UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD=$(sudo hiera snmpd_readonly_user_password) -UNDERCLOUD_CEILOMETER_SNMPD_USER=$(sudo hiera snmpd_readonly_user_name) -UNDERCLOUD_DB_PASSWORD=$(sudo hiera admin_password) -UNDERCLOUD_GLANCE_PASSWORD=$(sudo hiera glance::api::keystone_password) -UNDERCLOUD_HAPROXY_STATS_PASSWORD=$(sudo hiera tripleo::haproxy::haproxy_stats_password) -UNDERCLOUD_HEAT_ENCRYPTION_KEY=$(sudo hiera heat::engine::auth_encryption_key) -UNDERCLOUD_HEAT_PASSWORD=$(sudo hiera heat::keystone_password) -UNDERCLOUD_HEAT_STACK_DOMAIN_ADMIN_PASSWORD=$(sudo hiera heat_stack_domain_admin_password) -UNDERCLOUD_HORIZON_SECRET_KEY=$(sudo hiera horizon_secret_key) -UNDERCLOUD_IRONIC_PASSWORD=$(sudo hiera ironic::api::authtoken::password) -UNDERCLOUD_NEUTRON_PASSWORD=$(sudo hiera neutron::server::auth_password) -UNDERCLOUD_NOVA_PASSWORD=$(sudo hiera nova::keystone::authtoken::password) -UNDERCLOUD_RABBIT_COOKIE=$(sudo hiera rabbit_cookie) -UNDERCLOUD_RABBIT_PASSWORD=$(sudo hiera rabbit_password) -UNDERCLOUD_RABBIT_USERNAME=$(sudo hiera rabbit_username) -UNDERCLOUD_SWIFT_HASH_SUFFIX=$(sudo hiera swift::swift_hash_suffix) -UNDERCLOUD_SWIFT_PASSWORD=$(sudo hiera swift::proxy::authtoken::admin_password) -UNDERCLOUD_MISTRAL_PASSWORD=$(sudo hiera mistral::admin_password) -UNDERCLOUD_ZAQAR_PASSWORD=$(sudo hiera zaqar::keystone::authtoken::password) -UNDERCLOUD_CINDER_PASSWORD=$(sudo hiera cinder::keystone::authtoken::password) diff --git a/elements/undercloud-install/os-apply-config/root/tripleo-undercloud-passwords.oac b/elements/undercloud-install/os-apply-config/root/tripleo-undercloud-passwords.oac deleted file mode 100644 index f2c9e8a5c..000000000 --- a/elements/undercloud-install/os-apply-config/root/tripleo-undercloud-passwords.oac +++ /dev/null @@ -1 +0,0 @@ -mode: 0600 diff --git a/elements/undercloud-install/os-apply-config/var/opt/undercloud-stack/masquerade b/elements/undercloud-install/os-apply-config/var/opt/undercloud-stack/masquerade deleted file mode 100644 index 88e33d969..000000000 --- a/elements/undercloud-install/os-apply-config/var/opt/undercloud-stack/masquerade +++ /dev/null @@ -1,29 +0,0 @@ -# In case this script crashed or was interrupted earlier, flush, unlink and -# delete the temp chain. -IPTCOMMAND=iptables -if [[ {{local-ip}} =~ : ]] ; then - IPTCOMMAND=ip6tables -fi -$IPTCOMMAND -w -t nat -F BOOTSTACK_MASQ_NEW || true -$IPTCOMMAND -w -t nat -D POSTROUTING -j BOOTSTACK_MASQ_NEW || true -$IPTCOMMAND -w -t nat -X BOOTSTACK_MASQ_NEW || true -$IPTCOMMAND -w -t nat -N BOOTSTACK_MASQ_NEW -# Build the chain we want. -{{#masquerade_networks}} -NETWORK={{.}} -NETWORKS={{#masquerade_networks}}{{.}},{{/masquerade_networks}} -# Shell substitution to remove the traling comma -NETWORKS=${NETWORKS%?} -$IPTCOMMAND -w -t nat -A BOOTSTACK_MASQ_NEW -s $NETWORK -d $NETWORKS -j RETURN -$IPTCOMMAND -w -t nat -A BOOTSTACK_MASQ_NEW -s $NETWORK -j MASQUERADE -{{/masquerade_networks}} -# Link it in. -$IPTCOMMAND -w -t nat -I POSTROUTING -j BOOTSTACK_MASQ_NEW -# Delete the old chain if present. -$IPTCOMMAND -w -t nat -F BOOTSTACK_MASQ || true -$IPTCOMMAND -w -t nat -D POSTROUTING -j BOOTSTACK_MASQ || true -$IPTCOMMAND -w -t nat -X BOOTSTACK_MASQ || true -# Rename the new chain into permanence. -$IPTCOMMAND -w -t nat -E BOOTSTACK_MASQ_NEW BOOTSTACK_MASQ -# remove forwarding rule (fixes bug 1183099) -$IPTCOMMAND -w -D FORWARD -j REJECT --reject-with icmp-host-prohibited || true diff --git a/elements/undercloud-install/os-refresh-config/configure.d/30-reload-keepalived b/elements/undercloud-install/os-refresh-config/configure.d/30-reload-keepalived deleted file mode 100755 index 92ced25dd..000000000 --- a/elements/undercloud-install/os-refresh-config/configure.d/30-reload-keepalived +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -eux - -if systemctl is-enabled keepalived; then - # This needs to be run after os-net-config, since os-net-config potentially - # can restart network interfaces, which would affects VIPs controlled by - # keepalived. So don't just move this up without knowing the consequences. - # You have been warned. - systemctl reload keepalived -fi - diff --git a/elements/undercloud-install/os-refresh-config/post-configure.d/80-seedstack-masquerade b/elements/undercloud-install/os-refresh-config/post-configure.d/80-seedstack-masquerade deleted file mode 100755 index e414a927a..000000000 --- a/elements/undercloud-install/os-refresh-config/post-configure.d/80-seedstack-masquerade +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -set -eux - -RULES_SCRIPT=/var/opt/undercloud-stack/masquerade - -. $RULES_SCRIPT - -iptables-save > /etc/sysconfig/iptables - - -# We are specifically running the following commands after the -# iptables rules to ensure the persisted file does not contain any -# ephemeral neutron rules. Neutron assumes the iptables rules are not -# persisted so it may cause an issue if the rule is loaded on boot -# (or via iptables restart). If an operator needs to reload iptables -# for any reason, they may need to manually reload the appropriate -# neutron agent to restore these iptables rules. -# https://bugzilla.redhat.com/show_bug.cgi?id=1541528 -if /bin/test -f /etc/sysconfig/iptables && /bin/grep -q neutron- /etc/sysconfig/iptables -then - /bin/sed -i /neutron-/d /etc/sysconfig/iptables -fi - -if /bin/test -f /etc/sysconfig/ip6tables && /bin/grep -q neutron- /etc/sysconfig/ip6tables -then - /bin/sed -i /neutron-/d /etc/sysconfig/ip6tables -fi - - -# Do not persist ephemeral firewall rules managed by ironic-inspector -# pxe_filter 'iptables' driver. -# https://bugs.launchpad.net/tripleo/+bug/1765700 -if /bin/test -f /etc/sysconfig/iptables && /bin/grep -v "\-m comment \--comment" /etc/sysconfig/iptables | /bin/grep -q ironic-inspector -then - /bin/sed -i "/-m comment --comment.*ironic-inspector/p;/ironic-inspector/d" /etc/sysconfig/iptables -fi - -if /bin/test -f /etc/sysconfig/ip6tables && /bin/grep -v "\-m comment \--comment" /etc/sysconfig/ip6tables | /bin/grep -q ironic-inspector -then - /bin/sed -i "/-m comment --comment.*ironic-inspector/p;/ironic-inspector/d" /etc/sysconfig/ip6tables -fi diff --git a/elements/undercloud-install/os-refresh-config/post-configure.d/98-undercloud-setup b/elements/undercloud-install/os-refresh-config/post-configure.d/98-undercloud-setup deleted file mode 100755 index fca53aaad..000000000 --- a/elements/undercloud-install/os-refresh-config/post-configure.d/98-undercloud-setup +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -set -eux - -source /root/tripleo-undercloud-passwords -source /root/stackrc - -INSTACK_ROOT=${INSTACK_ROOT:-""} -export INSTACK_ROOT -if [ -n "$INSTACK_ROOT" ]; then - PATH=$PATH:$INSTACK_ROOT/instack-undercloud/scripts - export PATH -fi - -if [ ! -f /root/.ssh/authorized_keys ]; then - sudo mkdir -p /root/.ssh - sudo chmod 7000 /root/.ssh/ - sudo touch /root/.ssh/authorized_keys - sudo chmod 600 /root/.ssh/authorized_keys -fi - -if [ ! -f /root/.ssh/id_rsa ]; then - ssh-keygen -b 1024 -N '' -f /root/.ssh/id_rsa -fi - -cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys - -if [ -e /usr/sbin/getenforce ]; then - if [ "$(getenforce)" == "Enforcing" ]; then - set +e - selinux_wrong_permission="$(find /root/.ssh/ -exec ls -lZ {} \; | grep -v 'ssh_home_t')" - set -e - if [ -n "${selinux_wrong_permission}" ]; then - semanage fcontext -a -t ssh_home_t '/root/.ssh(/.*)?' - restorecon -R /root/.ssh/ - fi - fi -fi - -# Disable nova quotas -openstack quota set --cores -1 --instances -1 --ram -1 $(openstack project show admin | awk '$2=="id" {print $4}') - -# instack-prepare-for-overcloud -rm -rf $HOME/.novaclient diff --git a/imagefactory/Makefile b/imagefactory/Makefile deleted file mode 100644 index b15757783..000000000 --- a/imagefactory/Makefile +++ /dev/null @@ -1,66 +0,0 @@ -utility-image: - imagefactory --debug base_image \ - --file-parameter install_script \ - utility_image.ks utility_image.tdl - -input-image: - imagefactory --debug base_image \ - --file-parameter install_script \ - input_image.ks input_image.tdl - -overcloud-images: overcloud-control overcloud-compute overcloud-cinder-volume overcloud-swift-storage deploy-ramdisk-ironic discovery-ramdisk - -overcloud-control: - imagefactory --debug \ - target_image \ - --id $(INPUT_IMAGE_ID) \ - --parameter utility_image $(UTILITY_IMAGE_ID) \ - --file-parameter utility_customizations dib_overcloud_control.tdl \ - --parameter results_location "/overcloud-control.tar" indirection - tar -x -f $$(ls -1tr /var/lib/imagefactory/storage/*.body | tail -n 1) - -overcloud-compute: - imagefactory --debug \ - target_image \ - --id $(INPUT_IMAGE_ID) \ - --parameter utility_image $(UTILITY_IMAGE_ID) \ - --file-parameter utility_customizations dib_overcloud_compute.tdl \ - --parameter results_location "/overcloud-compute.tar" indirection - tar -x -f $$(ls -1tr /var/lib/imagefactory/storage/*.body | tail -n 1) - -overcloud-cinder-volume: - imagefactory --debug \ - target_image \ - --id $(INPUT_IMAGE_ID) \ - --parameter utility_image $(UTILITY_IMAGE_ID) \ - --file-parameter utility_customizations dib_overcloud_cinder_volume.tdl \ - --parameter results_location "/overcloud-cinder-volume.tar" indirection - tar -x -f $$(ls -1tr /var/lib/imagefactory/storage/*.body | tail -n 1) - -overcloud-swift-storage: - imagefactory --debug \ - target_image \ - --id $(INPUT_IMAGE_ID) \ - --parameter utility_image $(UTILITY_IMAGE_ID) \ - --file-parameter utility_customizations dib_overcloud_swift_storage.tdl \ - --parameter results_location "/overcloud-swift-storage.tar" indirection - tar -x -f $$(ls -1tr /var/lib/imagefactory/storage/*.body | tail -n 1) - -deploy-ramdisk-ironic: - imagefactory --debug \ - target_image \ - --id $(INPUT_IMAGE_ID) \ - --parameter utility_image $(UTILITY_IMAGE_ID) \ - --file-parameter utility_customizations dib_deploy_ramdisk_ironic.tdl \ - --parameter results_location "/deploy-ramdisk-ironic.tar" indirection - tar -x -f $$(ls -1tr /var/lib/imagefactory/storage/*.body | tail -n 1) - -discovery-ramdisk: - imagefactory --debug \ - target_image \ - --id $(INPUT_IMAGE_ID) \ - --parameter utility_image $(UTILITY_IMAGE_ID) \ - --file-parameter utility_customizations dib_discovery_ramdisk.tdl \ - --parameter results_location "/discovery-ramdisk.tar" indirection - tar -x -f $$(ls -1tr /var/lib/imagefactory/storage/*.body | tail -n 1) - diff --git a/imagefactory/dib_deploy_ramdisk_ironic.tdl b/imagefactory/dib_deploy_ramdisk_ironic.tdl deleted file mode 100644 index 35588cebf..000000000 --- a/imagefactory/dib_deploy_ramdisk_ironic.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/dib_discovery_ramdisk.tdl b/imagefactory/dib_discovery_ramdisk.tdl deleted file mode 100644 index 76fc45383..000000000 --- a/imagefactory/dib_discovery_ramdisk.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/dib_overcloud_cinder_volume.tdl b/imagefactory/dib_overcloud_cinder_volume.tdl deleted file mode 100644 index 5bd62f6bc..000000000 --- a/imagefactory/dib_overcloud_cinder_volume.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/dib_overcloud_compute.tdl b/imagefactory/dib_overcloud_compute.tdl deleted file mode 100644 index 7d6698657..000000000 --- a/imagefactory/dib_overcloud_compute.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/dib_overcloud_control.tdl b/imagefactory/dib_overcloud_control.tdl deleted file mode 100644 index 03126a738..000000000 --- a/imagefactory/dib_overcloud_control.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/dib_overcloud_swift_storage.tdl b/imagefactory/dib_overcloud_swift_storage.tdl deleted file mode 100644 index 2596bb2e6..000000000 --- a/imagefactory/dib_overcloud_swift_storage.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/input_image.ks b/imagefactory/input_image.ks deleted file mode 100644 index a8c12bb89..000000000 --- a/imagefactory/input_image.ks +++ /dev/null @@ -1,26 +0,0 @@ -url --url=http://download.eng.brq.redhat.com/pub/fedora/releases/20/Fedora/x86_64/os/ -# Without the Everything repo, we cannot install cloud-init -repo --name="fedora-everything" --baseurl=http://download.eng.brq.redhat.com/pub/fedora/releases/20/Everything/x86_64/os/ -install -text -keyboard us -lang en_US.UTF-8 -skipx -network --device eth0 --bootproto dhcp -rootpw ROOTPW -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr --append="console=tty0 console=ttyS0,115200" -zerombr -clearpart --all --drives=vda -part / --fstype="ext4" --size=3000 -reboot - -%packages -@core -cloud-init -tar - -%end diff --git a/imagefactory/input_image.tdl b/imagefactory/input_image.tdl deleted file mode 100644 index 3fa95e1cf..000000000 --- a/imagefactory/input_image.tdl +++ /dev/null @@ -1,12 +0,0 @@ - diff --git a/imagefactory/utility_image.ks b/imagefactory/utility_image.ks deleted file mode 100644 index 95c9de370..000000000 --- a/imagefactory/utility_image.ks +++ /dev/null @@ -1,41 +0,0 @@ -url --url=http://download.eng.brq.redhat.com/pub/fedora/releases/20/Fedora/x86_64/os/ -# Without the Everything repo, we cannot install cloud-init -repo --name="fedora-everything" --baseurl=http://download.eng.brq.redhat.com/pub/fedora/releases/20/Everything/x86_64/os/ -repo --name="updates" --baseurl=http://download.eng.brq.redhat.com/pub/fedora/linux/updates/20/x86_64/ -repo --name=openstack --baseurl=http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20/ - -# Uncomment the following line to use the copr repository -# repo --name=copr-openstack-m --baseurl=http://copr-be.cloud.fedoraproject.org/results/slagle/openstack-m/fedora-$releasever-$basearch/ - -install -text -keyboard us -lang en_US.UTF-8 - -skipx - -network --device eth0 --bootproto dhcp -rootpw ROOTPW -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr --append="console=tty0 console=ttyS0,115200" -zerombr -clearpart --all --drives=vda - -part biosboot --fstype=biosboot --size=1 -part /boot --fstype ext4 --size=200 --ondisk=vda -part pv.2 --size=1 --grow --ondisk=vda -volgroup VolGroup00 --pesize=32768 pv.2 -logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=768 --grow --maxsize=1536 -logvol / --fstype ext4 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow -reboot - -%packages -@core -qemu-img -instack-undercloud -git -%end - diff --git a/imagefactory/utility_image.tdl b/imagefactory/utility_image.tdl deleted file mode 100644 index 2ae7064b8..000000000 --- a/imagefactory/utility_image.tdl +++ /dev/null @@ -1,15 +0,0 @@ - diff --git a/instack_undercloud/__init__.py b/instack_undercloud/__init__.py deleted file mode 100644 index d17bb2cee..000000000 --- a/instack_undercloud/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo('instack_undercloud') diff --git a/instack_undercloud/tests/__init__.py b/instack_undercloud/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/instack_undercloud/tests/test_undercloud.py b/instack_undercloud/tests/test_undercloud.py deleted file mode 100644 index 9ab5e0032..000000000 --- a/instack_undercloud/tests/test_undercloud.py +++ /dev/null @@ -1,1635 +0,0 @@ -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import io -import json -import os -import subprocess -import tempfile -import time - -import fixtures -from keystoneauth1 import exceptions as ks_exceptions -import mock -from novaclient import exceptions -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslotest import base -from oslotest import log -from six.moves import configparser - -from instack_undercloud import undercloud -from instack_undercloud import validator - - -undercloud._configure_logging(undercloud.DEFAULT_LOG_LEVEL, None) - - -class BaseTestCase(base.BaseTestCase): - def setUp(self): - super(BaseTestCase, self).setUp() - self.logger = self.useFixture(log.ConfigureLogging()).logger - self.conf = self.useFixture(config_fixture.Config()) - self.conf.config(enable_routed_networks=True) - # ctlplane-subnet - config group options - self.grp0 = cfg.OptGroup(name='ctlplane-subnet', - title='ctlplane-subnet') - self.opts = [cfg.StrOpt('cidr'), - cfg.StrOpt('dhcp_start'), - cfg.StrOpt('dhcp_end'), - cfg.StrOpt('inspection_iprange'), - cfg.StrOpt('gateway'), - cfg.BoolOpt('masquerade')] - self.conf.register_opts(self.opts, group=self.grp0) - self.grp1 = cfg.OptGroup(name='subnet1', title='subnet1') - self.gtp2 = cfg.OptGroup(name='subnet2', title='subnet2') - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', dhcp_end='192.168.24.24', - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', masquerade=True, - group='ctlplane-subnet') - - -class TestUndercloud(BaseTestCase): - @mock.patch( - 'instack_undercloud.undercloud._load_subnets_config_groups') - @mock.patch('instack_undercloud.undercloud._handle_upgrade_fact') - @mock.patch('instack_undercloud.undercloud._configure_logging') - @mock.patch('instack_undercloud.undercloud._validate_configuration') - @mock.patch('instack_undercloud.undercloud._run_command') - @mock.patch('instack_undercloud.undercloud._post_config') - @mock.patch('instack_undercloud.undercloud._run_orc') - @mock.patch('instack_undercloud.undercloud._run_yum_update') - @mock.patch('instack_undercloud.undercloud._run_yum_clean_all') - @mock.patch('instack_undercloud.undercloud._run_instack') - @mock.patch('instack_undercloud.undercloud._generate_environment') - @mock.patch('instack_undercloud.undercloud._load_config') - @mock.patch('instack_undercloud.undercloud._run_validation_groups') - def test_install(self, mock_run_validation_groups, mock_load_config, - mock_generate_environment, mock_run_instack, - mock_run_clean_all, mock_run_yum_update, mock_run_orc, - mock_post_config, mock_run_command, - mock_validate_configuration, mock_configure_logging, - mock_upgrade_fact, mock_load_subnets_config_groups): - fake_env = mock.MagicMock() - mock_generate_environment.return_value = fake_env - undercloud.install('.') - self.assertTrue(mock_validate_configuration.called) - mock_generate_environment.assert_called_with('.') - mock_run_instack.assert_called_with(fake_env) - mock_run_orc.assert_called_with(fake_env) - mock_run_command.assert_called_with( - ['sudo', 'rm', '-f', '/tmp/svc-map-services'], None, 'rm') - mock_upgrade_fact.assert_called_with(False) - mock_run_validation_groups.assert_not_called() - - @mock.patch( - 'instack_undercloud.undercloud._load_subnets_config_groups') - @mock.patch('instack_undercloud.undercloud._handle_upgrade_fact') - @mock.patch('instack_undercloud.undercloud._configure_logging') - @mock.patch('instack_undercloud.undercloud._validate_configuration') - @mock.patch('instack_undercloud.undercloud._run_command') - @mock.patch('instack_undercloud.undercloud._post_config') - @mock.patch('instack_undercloud.undercloud._run_orc') - @mock.patch('instack_undercloud.undercloud._run_yum_update') - @mock.patch('instack_undercloud.undercloud._run_yum_clean_all') - @mock.patch('instack_undercloud.undercloud._run_instack') - @mock.patch('instack_undercloud.undercloud._generate_environment') - @mock.patch('instack_undercloud.undercloud._load_config') - @mock.patch('instack_undercloud.undercloud._run_validation_groups') - def test_install_upgrade(self, mock_run_validation_groups, - mock_load_config, - mock_generate_environment, - mock_run_instack, - mock_run_yum_clean_all, - mock_run_yum_update, - mock_run_orc, - mock_post_config, - mock_run_command, - mock_validate_configuration, - mock_configure_logging, - mock_upgrade_fact, - mock_load_subnets_config_groups): - fake_env = mock.MagicMock() - mock_generate_environment.return_value = fake_env - undercloud.install('.', upgrade=True) - self.assertTrue(mock_validate_configuration.called) - mock_generate_environment.assert_called_with('.') - mock_run_instack.assert_called_with(fake_env) - mock_run_orc.assert_called_with(fake_env) - mock_run_command.assert_called_with( - ['sudo', 'rm', '-f', '/tmp/svc-map-services'], None, 'rm') - mock_upgrade_fact.assert_called_with(True) - mock_run_validation_groups.assert_called_once() - - @mock.patch( - 'instack_undercloud.undercloud._load_subnets_config_groups') - @mock.patch('instack_undercloud.undercloud._handle_upgrade_fact') - @mock.patch('instack_undercloud.undercloud._configure_logging') - @mock.patch('instack_undercloud.undercloud._validate_configuration') - @mock.patch('instack_undercloud.undercloud._run_command') - @mock.patch('instack_undercloud.undercloud._post_config') - @mock.patch('instack_undercloud.undercloud._run_orc') - @mock.patch('instack_undercloud.undercloud._run_yum_update') - @mock.patch('instack_undercloud.undercloud._run_yum_clean_all') - @mock.patch('instack_undercloud.undercloud._run_instack') - @mock.patch('instack_undercloud.undercloud._generate_environment') - @mock.patch('instack_undercloud.undercloud._load_config') - @mock.patch('instack_undercloud.undercloud._run_validation_groups') - def test_install_upgrade_hieradata(self, mock_run_validation_groups, - mock_load_config, - mock_generate_environment, - mock_run_instack, - mock_run_yum_clean_all, - mock_run_yum_update, mock_run_orc, - mock_post_config, mock_run_command, - mock_validate_configuration, - mock_configure_logging, - mock_upgrade_fact, - mock_load_subnets_config_groups): - self.conf.config(hieradata_override='override.yaml') - with open(os.path.expanduser('~/override.yaml'), 'w') as f: - f.write('Something\n') - fake_env = mock.MagicMock() - mock_generate_environment.return_value = fake_env - undercloud.install('.', upgrade=True) - self.assertTrue(mock_validate_configuration.called) - mock_generate_environment.assert_called_with('.') - mock_run_instack.assert_called_with(fake_env) - mock_run_orc.assert_called_with(fake_env) - mock_run_command.assert_called_with( - ['sudo', 'rm', '-f', '/tmp/svc-map-services'], None, 'rm') - self.assertNotIn( - mock.call( - ['sudo', 'cp', 'override.yaml', - '/etc/puppet/hieradata/override.yaml']), - mock_run_command.mock_calls) - mock_upgrade_fact.assert_called_with(True) - mock_run_validation_groups.assert_called_once() - - @mock.patch('instack_undercloud.undercloud._configure_logging') - def test_install_exception(self, mock_configure_logging): - mock_configure_logging.side_effect = RuntimeError('foo') - self.assertRaises(RuntimeError, undercloud.install, '.') - log_dict = {'undercloud_operation': "install", - 'exception': 'foo', - 'log_file': undercloud.PATHS.LOG_FILE - } - self.assertIn(undercloud.FAILURE_MESSAGE % log_dict, - self.logger.output) - - @mock.patch('sys.exit') - @mock.patch('instack_undercloud.undercloud._configure_logging') - def test_install_exception_no_debug(self, mock_configure_logging, - mock_exit): - mock_configure_logging.side_effect = RuntimeError('foo') - self.conf.config(undercloud_debug=False) - undercloud.install('.') - log_dict = {'undercloud_operation': "install", - 'exception': 'foo', - 'log_file': undercloud.PATHS.LOG_FILE - } - self.assertIn(undercloud.FAILURE_MESSAGE % log_dict, - self.logger.output) - mock_exit.assert_called_with(1) - - def test_generate_password(self): - first = undercloud._generate_password() - second = undercloud._generate_password() - self.assertNotEqual(first, second) - - def test_extract_from_stackrc(self): - with open(os.path.expanduser('~/stackrc'), 'w') as f: - f.write('OS_USERNAME=aturing\n') - f.write('OS_AUTH_URL=https://bletchley:5000/\n') - self.assertEqual('aturing', - undercloud._extract_from_stackrc('OS_USERNAME')) - self.assertEqual('https://bletchley:5000/', - undercloud._extract_from_stackrc('OS_AUTH_URL')) - - @mock.patch('instack_undercloud.undercloud._check_hostname') - @mock.patch('instack_undercloud.undercloud._check_memory') - @mock.patch('instack_undercloud.undercloud._check_sysctl') - @mock.patch('instack_undercloud.undercloud._validate_network') - @mock.patch('instack_undercloud.undercloud._validate_no_ip_change') - @mock.patch('instack_undercloud.undercloud._validate_passwords_file') - def test_validate_configuration(self, mock_vpf, mock_vnic, - mock_validate_network, - mock_check_memory, mock_check_hostname, - mock_check_sysctl): - undercloud._validate_configuration() - self.assertTrue(mock_vpf.called) - self.assertTrue(mock_vnic.called) - self.assertTrue(mock_validate_network.called) - self.assertTrue(mock_check_memory.called) - self.assertTrue(mock_check_hostname.called) - self.assertTrue(mock_check_sysctl.called) - - -class TestCheckHostname(BaseTestCase): - @mock.patch('instack_undercloud.undercloud._run_command') - def test_correct(self, mock_run_command): - mock_run_command.side_effect = ['test-hostname', 'test-hostname'] - self.useFixture(fixtures.EnvironmentVariable('HOSTNAME', - 'test-hostname')) - fake_hosts = io.StringIO(u'127.0.0.1 test-hostname\n') - with mock.patch('instack_undercloud.undercloud.open', - return_value=fake_hosts, create=True): - undercloud._check_hostname() - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_static_transient_mismatch(self, mock_run_command): - mock_run_command.side_effect = ['test-hostname', 'other-hostname'] - self.useFixture(fixtures.EnvironmentVariable('HOSTNAME', - 'test-hostname')) - fake_hosts = io.StringIO(u'127.0.0.1 test-hostname\n') - with mock.patch('instack_undercloud.undercloud.open', - return_value=fake_hosts, create=True): - self.assertRaises(RuntimeError, undercloud._check_hostname) - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_no_substring_match(self, mock_run_command): - mock_run_command.side_effect = ['test.hostname', 'test.hostname', - None] - self.useFixture(fixtures.EnvironmentVariable('HOSTNAME', - 'test.hostname')) - fake_hosts = io.StringIO(u'127.0.0.1 test-hostname-bad\n') - with mock.patch('instack_undercloud.undercloud.open', - return_value=fake_hosts, create=True): - undercloud._check_hostname() - mock_run_command.assert_called_with([ - 'sudo', '/bin/bash', '-c', - 'sed -i "s/127.0.0.1\(\s*\)/127.0.0.1\\1test.hostname test /" ' - '/etc/hosts'], - name='hostname-to-etc-hosts') - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_commented(self, mock_run_command): - mock_run_command.side_effect = ['test.hostname', 'test.hostname', - None] - self.useFixture(fixtures.EnvironmentVariable('HOSTNAME', - 'test.hostname')) - fake_hosts = io.StringIO(u""" #127.0.0.1 test.hostname\n - 127.0.0.1 other-hostname\n""") - with mock.patch('instack_undercloud.undercloud.open', - return_value=fake_hosts, create=True): - undercloud._check_hostname() - mock_run_command.assert_called_with([ - 'sudo', '/bin/bash', '-c', - 'sed -i "s/127.0.0.1\(\s*\)/127.0.0.1\\1test.hostname test /" ' - '/etc/hosts'], - name='hostname-to-etc-hosts') - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_set_fqdn(self, mock_run_command): - mock_run_command.side_effect = [None, - 'test-hostname.domain', - 'test-hostname.domain', - None] - self.conf.config(undercloud_hostname='test-hostname.domain') - fake_hosts = io.StringIO(u'127.0.0.1 other-hostname\n') - with mock.patch('instack_undercloud.undercloud.open', - return_value=fake_hosts, create=True): - undercloud._check_hostname() - mock_run_command.assert_called_with([ - 'sudo', '/bin/bash', '-c', - 'sed -i "s/127.0.0.1\(\s*\)/' - '127.0.0.1\\1test-hostname.domain test-hostname /" ' - '/etc/hosts'], - name='hostname-to-etc-hosts') - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_set_not_fq(self, mock_run_command): - mock_run_command.side_effect = [None, - 'test-hostname', - 'test-hostname', - None] - self.conf.config(undercloud_hostname='test-hostname') - self.assertRaises(RuntimeError, undercloud._check_hostname) - - -class TestCheckMemory(BaseTestCase): - @mock.patch('psutil.swap_memory') - @mock.patch('psutil.virtual_memory') - def test_sufficient_memory(self, mock_vm, mock_sm): - mock_vm.return_value = mock.Mock() - mock_vm.return_value.total = 8589934592 - mock_sm.return_value = mock.Mock() - mock_sm.return_value.total = 0 - undercloud._check_memory() - - @mock.patch('psutil.swap_memory') - @mock.patch('psutil.virtual_memory') - def test_insufficient_memory(self, mock_vm, mock_sm): - mock_vm.return_value = mock.Mock() - mock_vm.return_value.total = 2071963648 - mock_sm.return_value = mock.Mock() - mock_sm.return_value.total = 0 - self.assertRaises(RuntimeError, undercloud._check_memory) - - @mock.patch('psutil.swap_memory') - @mock.patch('psutil.virtual_memory') - def test_sufficient_swap(self, mock_vm, mock_sm): - mock_vm.return_value = mock.Mock() - mock_vm.return_value.total = 6442450944 - mock_sm.return_value = mock.Mock() - mock_sm.return_value.total = 2147483648 - undercloud._check_memory() - - -class TestCheckSysctl(BaseTestCase): - @mock.patch('os.path.isfile') - def test_missing_options(self, mock_isfile): - mock_isfile.return_value = False - self.assertRaises(RuntimeError, undercloud._check_sysctl) - - @mock.patch('os.path.isfile') - def test_available_option(self, mock_isfile): - mock_isfile.return_value = True - undercloud._check_sysctl() - - -class TestNoIPChange(BaseTestCase): - @mock.patch('os.path.isfile', return_value=False) - def test_new_install(self, mock_isfile): - undercloud._validate_no_ip_change() - - @mock.patch('instack_undercloud.undercloud.open') - @mock.patch('json.loads') - @mock.patch('os.path.isfile', return_value=True) - def test_update_matches(self, mock_isfile, mock_loads, mock_open): - mock_members = [{'name': 'eth0'}, - {'name': 'br-ctlplane', - 'addresses': [{'ip_netmask': '192.168.24.1/24'}] - } - ] - mock_config = {'network_config': mock_members} - mock_loads.return_value = mock_config - undercloud._validate_no_ip_change() - - @mock.patch('instack_undercloud.undercloud.open') - @mock.patch('os.path.isfile', return_value=True) - def test_update_empty(self, mock_isfile, mock_open): - # This would be a way to disable os-net-config from running - mock_open.side_effect = [ - mock.mock_open(read_data='').return_value, - ] - undercloud._validate_no_ip_change() - - @mock.patch('instack_undercloud.undercloud.open') - @mock.patch('json.loads') - @mock.patch('os.path.isfile', return_value=True) - def test_update_mismatch(self, mock_isfile, mock_loads, mock_open): - mock_members = [{'name': 'eth0'}, - {'name': 'br-ctlplane', - 'addresses': [{'ip_netmask': '192.168.0.1/24'}] - } - ] - mock_config = {'network_config': mock_members} - mock_loads.return_value = mock_config - self.assertRaises(validator.FailedValidation, - undercloud._validate_no_ip_change) - - @mock.patch('instack_undercloud.undercloud.open') - @mock.patch('json.loads') - @mock.patch('os.path.isfile', return_value=True) - def test_update_no_network(self, mock_isfile, mock_loads, mock_open): - mock_members = [{'name': 'eth0'}] - mock_config = {'network_config': mock_members} - mock_loads.return_value = mock_config - undercloud._validate_no_ip_change() - - -@mock.patch('os.path.isfile') -class TestPasswordsFileExists(BaseTestCase): - def test_new_install(self, mock_isfile): - mock_isfile.side_effect = [False] - undercloud._validate_passwords_file() - - def test_update_exists(self, mock_isfile): - mock_isfile.side_effect = [True, True] - undercloud._validate_passwords_file() - - def test_update_missing(self, mock_isfile): - mock_isfile.side_effect = [True, False] - self.assertRaises(validator.FailedValidation, - undercloud._validate_passwords_file) - - -class TestGenerateEnvironment(BaseTestCase): - def setUp(self): - super(TestGenerateEnvironment, self).setUp() - # Things that need to always be mocked out, but that the tests - # don't want to care about. - self.useFixture(fixtures.MockPatch( - 'instack_undercloud.undercloud._write_password_file')) - self.useFixture(fixtures.MockPatch( - 'instack_undercloud.undercloud._load_config')) - mock_isdir = fixtures.MockPatch('os.path.isdir') - self.useFixture(mock_isdir) - mock_isdir.mock.return_value = False - # Some tests do care about this, but they can override the default - # return value, and then the tests that don't care can ignore it. - self.mock_distro = fixtures.MockPatch('platform.linux_distribution') - self.useFixture(self.mock_distro) - self.mock_distro.mock.return_value = [ - 'Red Hat Enterprise Linux Server 7.1'] - - @mock.patch('socket.gethostname') - def test_hostname_set(self, mock_gethostname): - fake_hostname = 'crazy-test-hostname-!@#$%12345' - mock_gethostname.return_value = fake_hostname - env = undercloud._generate_environment('.') - self.assertEqual(fake_hostname, env['HOSTNAME']) - - def test_elements_path_input(self): - test_path = '/test/elements/path' - self.useFixture(fixtures.EnvironmentVariable('ELEMENTS_PATH', - test_path)) - env = undercloud._generate_environment('.') - self.assertEqual(test_path, env['ELEMENTS_PATH']) - - def test_default_elements_path(self): - env = undercloud._generate_environment('.') - test_path = ('%s:%s:/usr/share/tripleo-image-elements:' - '/usr/share/diskimage-builder/elements' % - (os.path.join(os.getcwd(), 'tripleo-puppet-elements', - 'elements'), - './elements')) - self.assertEqual(test_path, env['ELEMENTS_PATH']) - - def test_rhel7_distro(self): - self.useFixture(fixtures.EnvironmentVariable('NODE_DIST', None)) - env = undercloud._generate_environment('.') - self.assertEqual('rhel7', env['NODE_DIST']) - self.assertEqual('./json-files/rhel-7-undercloud-packages.json', - env['JSONFILE']) - self.assertEqual('disable', env['REG_METHOD']) - self.assertEqual('1', env['REG_HALT_UNREGISTER']) - - def test_centos7_distro(self): - self.useFixture(fixtures.EnvironmentVariable('NODE_DIST', None)) - self.mock_distro.mock.return_value = ['CentOS Linux release 7.1'] - env = undercloud._generate_environment('.') - self.assertEqual('centos7', env['NODE_DIST']) - self.assertEqual('./json-files/centos-7-undercloud-packages.json', - env['JSONFILE']) - - def test_fedora_distro(self): - self.useFixture(fixtures.EnvironmentVariable('NODE_DIST', None)) - self.mock_distro.mock.return_value = ['Fedora Infinity + 1'] - self.assertRaises(RuntimeError, undercloud._generate_environment, '.') - - def test_other_distro(self): - self.useFixture(fixtures.EnvironmentVariable('NODE_DIST', None)) - self.mock_distro.mock.return_value = ['Gentoo'] - self.assertRaises(RuntimeError, undercloud._generate_environment, '.') - - def test_opts_in_env(self): - env = undercloud._generate_environment('.') - # Just spot check, we don't want to replicate the entire opt list here - self.assertEqual(env['INSPECTION_COLLECTORS'], - 'default,extra-hardware,numa-topology,logs') - self.assertEqual('192.168.24.1/24', env['PUBLIC_INTERFACE_IP']) - self.assertEqual('192.168.24.1', env['LOCAL_IP']) - # The list is generated from a set, so we can't rely on ordering. - # Instead make sure that it looks like a valid list by parsing it. - hw_types = json.loads(env['ENABLED_HARDWARE_TYPES']) - self.assertEqual(sorted(hw_types), ['idrac', 'ilo', 'ipmi', 'redfish']) - self.assertEqual( - sorted(json.loads(env['ENABLED_BOOT_INTERFACES'])), - ['ilo-pxe', 'pxe']) - self.assertEqual( - sorted(json.loads(env['ENABLED_POWER_INTERFACES'])), - ['fake', 'idrac', 'ilo', 'ipmitool', 'redfish']) - self.assertEqual( - sorted(json.loads(env['ENABLED_MANAGEMENT_INTERFACES'])), - ['fake', 'idrac', 'ilo', 'ipmitool', 'redfish']) - self.assertEqual( - sorted(json.loads(env['ENABLED_RAID_INTERFACES'])), - ['idrac', 'no-raid']) - self.assertEqual( - sorted(json.loads(env['ENABLED_VENDOR_INTERFACES'])), - ['idrac', 'ipmitool', 'no-vendor']) - self.assertEqual(env['INSPECTION_NODE_NOT_FOUND_HOOK'], '') - - def test_all_hardware_types(self): - self.conf.config(enabled_hardware_types=['ipmi', 'redfish', 'ilo', - 'idrac', 'irmc', 'snmp', - 'cisco-ucs-managed', - 'cisco-ucs-standalone']) - env = undercloud._generate_environment('.') - # The list is generated from a set, so we can't rely on ordering. - # Instead make sure that it looks like a valid list by parsing it. - hw_types = json.loads(env['ENABLED_HARDWARE_TYPES']) - self.assertEqual(sorted(hw_types), ['cisco-ucs-managed', - 'cisco-ucs-standalone', - 'idrac', 'ilo', 'ipmi', 'irmc', - 'redfish', 'snmp']) - self.assertEqual( - sorted(json.loads(env['ENABLED_BOOT_INTERFACES'])), - ['ilo-pxe', 'irmc-pxe', 'pxe']) - self.assertEqual( - sorted(json.loads(env['ENABLED_POWER_INTERFACES'])), - ['cimc', 'fake', 'idrac', 'ilo', 'ipmitool', 'irmc', - 'redfish', 'snmp', 'ucsm']) - self.assertEqual( - sorted(json.loads(env['ENABLED_MANAGEMENT_INTERFACES'])), - ['cimc', 'fake', 'idrac', 'ilo', 'ipmitool', 'irmc', - 'redfish', 'ucsm']) - self.assertEqual( - sorted(json.loads(env['ENABLED_RAID_INTERFACES'])), - ['idrac', 'no-raid']) - self.assertEqual( - sorted(json.loads(env['ENABLED_VENDOR_INTERFACES'])), - ['idrac', 'ipmitool', 'no-vendor']) - - def test_enabled_discovery(self): - self.conf.config(enable_node_discovery=True, - discovery_default_driver='foobar', - enabled_hardware_types=['ipmi', 'something']) - env = undercloud._generate_environment('.') - # The list is generated from a set, so we can't rely on ordering. - # Instead make sure that it looks like a valid list by parsing it. - hw_types = json.loads(env['ENABLED_HARDWARE_TYPES']) - self.assertEqual(sorted(hw_types), ['foobar', 'ipmi', 'something']) - - def test_docker_registry_mirror(self): - self.conf.config(docker_registry_mirror='http://foo/bar') - env = undercloud._generate_environment('.') - # Spot check one service - self.assertEqual('http://foo/bar', - env['DOCKER_REGISTRY_MIRROR']) - - def test_docker_insecure_registries(self): - self.conf.config(docker_insecure_registries=['http://foo/bar:8787']) - env = undercloud._generate_environment('.') - insecure_registries = json.loads(env['DOCKER_INSECURE_REGISTRIES']) - # Spot check one service - self.assertEqual(['http://foo/bar:8787'], insecure_registries) - - def test_generate_endpoints(self): - env = undercloud._generate_environment('.') - endpoint_vars = {k: v for (k, v) in env.items() - if k.startswith('UNDERCLOUD_ENDPOINT')} - self.assertEqual(96, len(endpoint_vars)) - # Spot check one service - self.assertEqual('https://192.168.24.2:13000', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC']) - self.assertEqual('http://192.168.24.3:5000', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_INTERNAL']) - self.assertEqual('http://192.168.24.3:35357', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_ADMIN']) - # Also check that the tenant id part is preserved - self.assertEqual('https://192.168.24.2:13808/v1/AUTH_%(tenant_id)s', - env['UNDERCLOUD_ENDPOINT_SWIFT_PUBLIC']) - - def test_generate_endpoints_ssl_manual(self): - self.conf.config(undercloud_service_certificate='test.pem') - env = undercloud._generate_environment('.') - # Spot check one service - self.assertEqual('https://192.168.24.2:13000', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC']) - self.assertEqual('http://192.168.24.3:5000', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_INTERNAL']) - self.assertEqual('http://192.168.24.3:35357', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_ADMIN']) - self.assertEqual('https://192.168.24.2:443/keystone/v3', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_UI_CONFIG_PUBLIC']) - # Also check that the tenant id part is preserved - self.assertEqual('https://192.168.24.2:13808/v1/AUTH_%(tenant_id)s', - env['UNDERCLOUD_ENDPOINT_SWIFT_PUBLIC']) - - def test_generate_endpoints_ssl_off(self): - self.conf.config(generate_service_certificate=False) - env = undercloud._generate_environment('.') - # Spot check one service - self.assertEqual('http://192.168.24.1:5000', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_PUBLIC']) - self.assertEqual('http://192.168.24.1:5000', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_INTERNAL']) - self.assertEqual('http://192.168.24.1:35357', - env['UNDERCLOUD_ENDPOINT_KEYSTONE_ADMIN']) - # Also check that the tenant id part is preserved - self.assertEqual('http://192.168.24.1:8080/v1/AUTH_%(tenant_id)s', - env['UNDERCLOUD_ENDPOINT_SWIFT_PUBLIC']) - - def test_absolute_cert_path(self): - self.conf.config(undercloud_service_certificate='/home/stack/test.pem') - env = undercloud._generate_environment('.') - self.assertEqual('/home/stack/test.pem', - env['UNDERCLOUD_SERVICE_CERTIFICATE']) - - def test_relative_cert_path(self): - [cert] = self.create_tempfiles([('test', 'foo')], '.pem') - rel_cert = os.path.basename(cert) - cert_path = os.path.dirname(cert) - cur_dir = os.getcwd() - try: - os.chdir(cert_path) - self.conf.config(undercloud_service_certificate=rel_cert) - env = undercloud._generate_environment('.') - self.assertEqual(os.path.join(os.getcwd(), rel_cert), - env['UNDERCLOUD_SERVICE_CERTIFICATE']) - finally: - os.chdir(cur_dir) - - def test_no_cert_path(self): - env = undercloud._generate_environment('.') - self.assertEqual('/etc/pki/tls/certs/undercloud-192.168.24.2.pem', - env['UNDERCLOUD_SERVICE_CERTIFICATE']) - - def test_no_ssl(self): - self.conf.config(generate_service_certificate=False) - env = undercloud._generate_environment('.') - self.assertEqual('', env['UNDERCLOUD_SERVICE_CERTIFICATE']) - - def test_remove_dib_yum_repo_conf(self): - self.useFixture(fixtures.EnvironmentVariable('DIB_YUM_REPO_CONF', - 'rum_yepo.conf')) - env = undercloud._generate_environment('.') - self.assertNotIn(env, 'DIB_YUM_REPO_CONF') - - def test_inspection_ip_single_subnet(self): - env = undercloud._generate_environment('.') - reference = [{"tag": "ctlplane-subnet", "gateway": "192.168.24.1", - "ip_range": "192.168.24.100,192.168.24.120", - "netmask": "255.255.255.0"}] - actual = json.loads(env['INSPECTION_SUBNETS']) - self.assertEqual(reference, actual) - - def test_inspection_ip_multiple_subnets(self): - self.conf.config(subnets=['subnet1', 'subnet2']) - self.conf.config(local_subnet='subnet1') - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.gtp2) - self.conf.config(cidr='192.168.10.0/24', dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', masquerade=True, - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', masquerade=True, - group='subnet2') - env = undercloud._generate_environment('.') - reference = [{"tag": "subnet1", "gateway": "192.168.10.254", - "ip_range": "192.168.10.100,192.168.10.189", - "netmask": "255.255.255.0"}, - {"tag": "subnet2", "gateway": "192.168.20.254", - "ip_range": "192.168.20.100,192.168.20.189", - "netmask": "255.255.255.0"}] - actual = json.loads(env['INSPECTION_SUBNETS']) - self.assertEqual(reference, actual) - - def test_subnets_static_routes(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.gtp2) - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', dhcp_end='192.168.24.24', - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', masquerade=True, - group='ctlplane-subnet') - self.conf.config(cidr='192.168.10.0/24', dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', masquerade=True, - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', masquerade=True, - group='subnet2') - env = undercloud._generate_environment('.') - reference = [{"ip_netmask": "192.168.10.0/24", - "next_hop": "192.168.24.1"}, - {"ip_netmask": "192.168.20.0/24", - "next_hop": "192.168.24.1"}] - actual = json.loads(env['SUBNETS_STATIC_ROUTES']) - self.assertEqual(reference, actual) - - def test_subnets_subnets_cidr_nat_rules(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.gtp2) - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', dhcp_end='192.168.24.24', - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', group='ctlplane-subnet') - self.conf.config(cidr='192.168.10.0/24', dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', group='subnet1') - self.conf.config(cidr='192.168.20.0/24', dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', group='subnet2') - - env = undercloud._generate_environment('.') - reference = ('"140 destination ctlplane-subnet cidr nat": ' - '{"chain": "FORWARD", "destination": "192.168.24.0/24", ' - '"proto": "all", "action": "accept"}' - '\n "140 source ctlplane-subnet cidr nat": ' - '{"chain": "FORWARD", "source": "192.168.24.0/24", ' - '"proto": "all", "action": "accept"}' - '\n "140 destination subnet1 cidr nat": ' - '{"chain": "FORWARD", "destination": "192.168.10.0/24", ' - '"proto": "all", "action": "accept"}' - '\n "140 source subnet1 cidr nat": ' - '{"chain": "FORWARD", "source": "192.168.10.0/24", ' - '"proto": "all", "action": "accept"}' - '\n "140 destination subnet2 cidr nat": ' - '{"chain": "FORWARD", "destination": "192.168.20.0/24", ' - '"proto": "all", "action": "accept"}' - '\n "140 source subnet2 cidr nat": ' - '{"chain": "FORWARD", "source": "192.168.20.0/24", ' - '"proto": "all", "action": "accept"}') - actual = env['SUBNETS_CIDR_NAT_RULES'] - self.assertEqual(reference, actual) - - def test_masquerade_networks(self): - self.conf.config(subnets=['ctlplane-subnet', 'subnet1', 'subnet2']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.register_opts(self.opts, group=self.gtp2) - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', dhcp_end='192.168.24.24', - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', masquerade=True, - group='ctlplane-subnet') - self.conf.config(cidr='192.168.10.0/24', dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', masquerade=True, - group='subnet1') - self.conf.config(cidr='192.168.20.0/24', dhcp_start='192.168.20.10', - dhcp_end='192.168.20.99', - inspection_iprange='192.168.20.100,192.168.20.189', - gateway='192.168.20.254', masquerade=True, - group='subnet2') - - env = undercloud._generate_environment('.') - reference = ['192.168.24.0/24', '192.168.10.0/24', '192.168.20.0/24'] - actual = json.loads(env['MASQUERADE_NETWORKS']) - self.assertEqual(reference, actual) - - -class TestWritePasswordFile(BaseTestCase): - def test_normal(self): - instack_env = {} - undercloud._write_password_file(instack_env) - test_parser = configparser.ConfigParser() - test_parser.read(undercloud.PATHS.PASSWORD_PATH) - self.assertTrue(test_parser.has_option('auth', - 'undercloud_db_password')) - self.assertIn('UNDERCLOUD_DB_PASSWORD', instack_env) - self.assertEqual(32, - len(instack_env['UNDERCLOUD_HEAT_ENCRYPTION_KEY'])) - - def test_value_set(self): - instack_env = {} - self.conf.config(undercloud_db_password='test', group='auth') - undercloud._write_password_file(instack_env) - test_parser = configparser.ConfigParser() - test_parser.read(undercloud.PATHS.PASSWORD_PATH) - self.assertEqual(test_parser.get('auth', 'undercloud_db_password'), - 'test') - self.assertEqual(instack_env['UNDERCLOUD_DB_PASSWORD'], 'test') - - -class TestRunCommand(BaseTestCase): - def test_run_command(self): - output = undercloud._run_command(['echo', 'foo']) - self.assertEqual('foo\n', output) - - def test_run_live_command(self): - undercloud._run_live_command(['echo', 'bar']) - self.assertIn('bar\n', self.logger.output) - - @mock.patch('subprocess.check_output') - def test_run_command_fails(self, mock_check_output): - fake_exc = subprocess.CalledProcessError(1, 'nothing', 'fake failure') - mock_check_output.side_effect = fake_exc - self.assertRaises(subprocess.CalledProcessError, - undercloud._run_command, ['nothing']) - self.assertIn('nothing failed', self.logger.output) - self.assertIn('fake failure', self.logger.output) - - @mock.patch('subprocess.check_output') - def test_run_command_fails_with_name(self, mock_check_output): - fake_exc = subprocess.CalledProcessError(1, 'nothing', 'fake failure') - mock_check_output.side_effect = fake_exc - self.assertRaises(subprocess.CalledProcessError, - undercloud._run_command, ['nothing'], - name='fake_name') - self.assertIn('fake_name failed', self.logger.output) - self.assertIn('fake failure', self.logger.output) - - def test_run_live_command_fails(self): - exc = self.assertRaises(RuntimeError, undercloud._run_live_command, - ['ls', '/nonexistent/path']) - self.assertIn('ls failed', str(exc)) - self.assertIn('ls', self.logger.output) - - def test_run_live_command_fails_name(self): - exc = self.assertRaises(RuntimeError, undercloud._run_live_command, - ['ls', '/nonexistent/path'], - name='fake_name') - self.assertIn('fake_name failed', str(exc)) - - def test_run_command_env(self): - env = {'FOO': 'foo'} - output = undercloud._run_command(['env'], env) - self.assertIn('FOO=foo', output) - - def test_run_live_command_env(self): - env = {'BAR': 'bar'} - undercloud._run_live_command(['env'], env) - self.assertIn('BAR=bar', self.logger.output) - - -class TestRunTools(base.BaseTestCase): - @mock.patch('instack_undercloud.undercloud._run_live_command') - def test_run_instack(self, mock_run): - instack_env = {'ELEMENTS_PATH': '.', 'JSONFILE': 'file.json'} - args = ['sudo', '-E', 'instack', '-p', '.', '-j', 'file.json'] - undercloud._run_instack(instack_env) - mock_run.assert_called_with(args, instack_env, 'instack') - - @mock.patch('instack_undercloud.undercloud._run_live_command') - def test_run_os_refresh_config(self, mock_run): - instack_env = {} - args = ['sudo', 'os-refresh-config'] - undercloud._run_orc(instack_env) - mock_run.assert_called_with(args, instack_env, 'os-refresh-config') - - -@mock.patch('instack_undercloud.undercloud._run_command') -class TestConfigureSshKeys(base.BaseTestCase): - def test_ensure_user_identity(self, mock_run): - id_path = os.path.expanduser('~/.ssh/id_rsa') - undercloud._ensure_user_identity(id_path) - mock_run.assert_called_with(['ssh-keygen', '-t', 'rsa', '-N', '', - '-f', id_path]) - - def _create_test_id(self): - id_path = os.path.expanduser('~/.ssh/id_rsa') - os.makedirs(os.path.expanduser('~/.ssh')) - with open(id_path, 'w') as id_rsa: - id_rsa.write('test private\n') - with open(id_path + '.pub', 'w') as id_pub: - id_pub.write('test public\n') - return id_path - - def test_ensure_user_identity_exists(self, mock_run): - id_path = self._create_test_id() - undercloud._ensure_user_identity(id_path) - self.assertFalse(mock_run.called) - - def _test_configure_ssh_keys(self, mock_eui, exists=True): - id_path = self._create_test_id() - mock_client_instance = mock.Mock() - if not exists: - get = mock_client_instance.keypairs.get - get.side_effect = exceptions.NotFound('test') - undercloud._configure_ssh_keys(mock_client_instance) - mock_eui.assert_called_with(id_path) - mock_client_instance.keypairs.get.assert_called_with('default') - if not exists: - mock_client_instance.keypairs.create.assert_called_with( - 'default', 'test public') - - @mock.patch('instack_undercloud.undercloud._ensure_user_identity') - def test_configure_ssh_keys_exists(self, mock_eui, _): - self._test_configure_ssh_keys(mock_eui) - - @mock.patch('instack_undercloud.undercloud._ensure_user_identity') - def test_configure_ssh_keys_missing(self, mock_eui, _): - self._test_configure_ssh_keys(mock_eui, False) - - -class TestPostConfig(BaseTestCase): - @mock.patch('os_client_config.make_client') - @mock.patch('instack_undercloud.undercloud._migrate_to_convergence') - @mock.patch('instack_undercloud.undercloud._ensure_node_resource_classes') - @mock.patch( - 'instack_undercloud.undercloud._config_neutron_segments_and_subnets') - @mock.patch('instack_undercloud.undercloud._ensure_neutron_network') - @mock.patch('instack_undercloud.undercloud._member_role_exists') - @mock.patch('instack_undercloud.undercloud._get_session') - @mock.patch('ironicclient.client.get_client', autospec=True) - @mock.patch('novaclient.client.Client', autospec=True) - @mock.patch('swiftclient.client.Connection', autospec=True) - @mock.patch('mistralclient.api.client.client', autospec=True) - @mock.patch('instack_undercloud.undercloud._delete_default_flavors') - @mock.patch('instack_undercloud.undercloud._copy_stackrc') - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('instack_undercloud.undercloud._configure_ssh_keys') - @mock.patch('instack_undercloud.undercloud._ensure_flavor') - @mock.patch('instack_undercloud.undercloud._post_config_mistral') - def test_post_config(self, mock_post_config_mistral, mock_ensure_flavor, - mock_configure_ssh_keys, mock_get_auth_values, - mock_copy_stackrc, mock_delete, mock_mistral_client, - mock_swift_client, mock_nova_client, mock_ir_client, - mock_get_session, mock_member_role_exists, - mock_ensure_neutron_network, - mock_config_neutron_segments_and_subnets, - mock_resource_classes, mock_migrate_to_convergence, - mock_make_client): - instack_env = { - 'UNDERCLOUD_ENDPOINT_MISTRAL_PUBLIC': - 'http://192.168.24.1:8989/v2', - } - mock_get_auth_values.return_value = ('aturing', '3nigma', 'hut8', - 'http://bletchley:5000/') - mock_instance_nova = mock.Mock() - mock_nova_client.return_value = mock_instance_nova - mock_get_session.return_value = mock.MagicMock() - mock_instance_swift = mock.Mock() - mock_swift_client.return_value = mock_instance_swift - mock_instance_mistral = mock.Mock() - mock_mistral_client.return_value = mock_instance_mistral - mock_instance_ironic = mock_ir_client.return_value - flavors = [mock.Mock(spec=['name']), - mock.Mock(spec=['name'])] - # The mock library treats "name" attribute differently, and we cannot - # pass it through __init__ - flavors[0].name = 'baremetal' - flavors[1].name = 'ceph-storage' - mock_instance_nova.flavors.list.return_value = flavors - mock_heat = mock.Mock() - mock_make_client.return_value = mock_heat - - undercloud._post_config(instack_env, True) - mock_nova_client.assert_called_with( - 2, session=mock_get_session.return_value) - self.assertTrue(mock_copy_stackrc.called) - mock_configure_ssh_keys.assert_called_with(mock_instance_nova) - calls = [mock.call(mock_instance_nova, flavors[0], 'baremetal', None), - mock.call(mock_instance_nova, None, 'control', 'control'), - mock.call(mock_instance_nova, None, 'compute', 'compute'), - mock.call(mock_instance_nova, flavors[1], - 'ceph-storage', 'ceph-storage'), - mock.call(mock_instance_nova, None, - 'block-storage', 'block-storage'), - mock.call(mock_instance_nova, None, - 'swift-storage', 'swift-storage'), - ] - mock_ensure_flavor.assert_has_calls(calls) - mock_resource_classes.assert_called_once_with(mock_instance_ironic) - mock_post_config_mistral.assert_called_once_with( - instack_env, mock_instance_mistral, mock_instance_swift) - mock_migrate_to_convergence.assert_called_once_with(mock_heat) - - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('instack_undercloud.undercloud._get_session') - @mock.patch('mistralclient.api.client.client', autospec=True) - def test_run_validation_groups_success(self, mock_mistral_client, - mock_get_session, - mock_auth_values): - mock_mistral = mock.Mock() - mock_mistral_client.return_value = mock_mistral - mock_mistral.environments.list.return_value = [] - mock_mistral.executions.get.return_value = mock.Mock(state="SUCCESS") - mock_get_session.return_value = mock.MagicMock() - undercloud._run_validation_groups(["post-upgrade"]) - mock_mistral.executions.create.assert_called_once_with( - 'tripleo.validations.v1.run_groups', - workflow_input={ - 'group_names': ['post-upgrade'], - } - ) - - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('instack_undercloud.undercloud._get_session') - @mock.patch('mistralclient.api.client.client', autospec=True) - @mock.patch('time.strptime') - def test_run_validation_groups_fail(self, mock_strptime, - mock_mistral_client, mock_get_session, - mock_auth_values): - mock_mistral = mock.Mock() - mock_mistral_client.return_value = mock_mistral - mock_mistral.environments.list.return_value = [] - mock_mistral.executions.get.return_value = mock.Mock(state="FAIL") - mock_mistral.executions.get_output.return_value = "ERROR!" - mock_mistral.executions.get.id = "1234" - mock_mistral.action_executions.list.return_value = [] - mock_strptime.return_value = time.mktime(time.localtime()) - mock_get_session.return_value = mock.MagicMock() - self.assertRaises( - RuntimeError, undercloud._run_validation_groups, ["post-upgrade"], - "", 360, True) - - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('instack_undercloud.undercloud._get_session') - @mock.patch('mistralclient.api.client.client', autospec=True) - @mock.patch('time.strptime') - def test_run_validation_groups_timeout(self, mock_strptime, - mock_mistral_client, - mock_get_session, mock_auth_values): - mock_mistral = mock.Mock() - mock_mistral_client.return_value = mock_mistral - mock_mistral.environments.list.return_value = [] - mock_mistral.executions.get.id = "1234" - mock_mistral.action_executions.list.return_value = [] - mock_get_session.return_value = mock.MagicMock() - mock_time = mock.MagicMock() - mock_time.return_value = time.mktime(time.localtime()) - mock_strptime.return_value = time.mktime(time.localtime()) - with mock.patch('time.time', mock_time): - self.assertRaisesRegex(RuntimeError, ("TIMEOUT waiting for " - "execution"), - undercloud._run_validation_groups, - ["post-upgrade"], "", -1, True) - - def test_create_default_plan(self): - mock_mistral = mock.Mock() - mock_mistral.environments.list.return_value = [] - mock_mistral.executions.get.return_value = mock.Mock(state="SUCCESS") - - undercloud._create_default_plan(mock_mistral, []) - mock_mistral.executions.create.assert_called_once_with( - 'tripleo.plan_management.v1.create_deployment_plan', - workflow_input={ - 'container': 'overcloud', - 'use_default_templates': True, - } - ) - - def test_create_default_plan_existing(self): - mock_mistral = mock.Mock() - undercloud._create_default_plan(mock_mistral, ['overcloud']) - mock_mistral.executions.create.assert_not_called() - - def test_create_config_environment(self): - mock_mistral = mock.Mock() - mock_mistral.environments.get.side_effect = ( - ks_exceptions.NotFound) - - env = { - "UNDERCLOUD_DB_PASSWORD": "root-db-pass", - "UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD": "snmpd-pass" - } - - json_string = { - "undercloud_db_password": "root-db-pass", - "undercloud_ceilometer_snmpd_password": "snmpd-pass" - } - - undercloud._create_mistral_config_environment(json.loads( - json.dumps(env, sort_keys=True)), mock_mistral) - - mock_mistral.environments.create.assert_called_once_with( - name='tripleo.undercloud-config', - description='Undercloud configuration parameters', - variables=json.dumps(json_string, sort_keys=True)) - - def test_create_config_environment_existing(self): - mock_mistral = mock.Mock() - environment = collections.namedtuple('environment', - ['name', 'variables']) - - json_string = { - "undercloud_db_password": "root-db-pass", - "undercloud_ceilometer_snmpd_password": "snmpd-pass" - } - - mock_mistral.environments.get.return_value = environment( - name='tripleo.undercloud-config', - variables=json.loads(json.dumps(json_string, sort_keys=True)) - ) - - env = { - "UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD": "snmpd-pass", - "UNDERCLOUD_DB_PASSWORD": "root-db-pass" - } - - undercloud._create_mistral_config_environment(json.loads( - json.dumps(env, sort_keys=True)), mock_mistral) - mock_mistral.executions.create.assert_not_called() - - def test_prepare_ssh_environment(self): - mock_mistral = mock.Mock() - undercloud._prepare_ssh_environment(mock_mistral) - mock_mistral.executions.create.assert_called_once_with( - 'tripleo.validations.v1.copy_ssh_key') - - @mock.patch('time.sleep') - def test_create_default_plan_timeout(self, mock_sleep): - mock_mistral = mock.Mock() - mock_mistral.executions.get.return_value = mock.Mock(state="RUNNING") - - self.assertRaises( - RuntimeError, - undercloud._create_default_plan, mock_mistral, [], timeout=0) - - @mock.patch('time.strptime') - def test_create_default_plan_failed(self, mock_strptime): - mock_mistral = mock.Mock() - mock_mistral.executions.get.return_value = mock.Mock(state="ERROR") - mock_mistral.action_executions.list.return_value = [] - mock_strptime.return_value = time.mktime(time.localtime()) - self.assertRaises( - RuntimeError, - undercloud._create_default_plan, mock_mistral, []) - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_copy_stackrc(self, mock_run): - undercloud._copy_stackrc() - calls = [mock.call(['sudo', 'cp', '/root/stackrc', mock.ANY], - name='Copy stackrc'), - mock.call(['sudo', 'chown', mock.ANY, mock.ANY], - name='Chown stackrc'), - ] - mock_run.assert_has_calls(calls) - - def _mock_ksclient_roles(self, mock_auth_values, mock_ksdiscover, roles): - mock_auth_values.return_value = ('user', 'password', - 'project', 'http://test:123') - mock_discover = mock.Mock() - mock_ksdiscover.return_value = mock_discover - mock_client = mock.Mock() - mock_roles = mock.Mock() - mock_role_list = [] - for role in roles: - mock_role = mock.Mock() - mock_role.name = role - mock_role_list.append(mock_role) - mock_roles.list.return_value = mock_role_list - mock_client.roles = mock_roles - mock_discover.create_client.return_value = mock_client - - mock_client.version = 'v3' - - mock_project_list = [mock.Mock(), mock.Mock()] - mock_project_list[0].name = 'admin' - mock_project_list[0].id = 'admin-id' - mock_project_list[1].name = 'service' - mock_project_list[1].id = 'service-id' - mock_client.projects.list.return_value = mock_project_list - - mock_user_list = [mock.Mock(), mock.Mock()] - mock_user_list[0].name = 'admin' - mock_user_list[1].name = 'nova' - mock_client.users.list.return_value = mock_user_list - return mock_client - - @mock.patch('keystoneclient.discover.Discover') - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('os.path.isfile') - def test_member_role_exists(self, mock_isfile, mock_auth_values, - mock_ksdiscover): - mock_isfile.return_value = True - mock_client = self._mock_ksclient_roles(mock_auth_values, - mock_ksdiscover, - ['admin']) - undercloud._member_role_exists() - self.assertFalse(mock_client.projects.list.called) - - @mock.patch('keystoneclient.discover.Discover') - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('os.path.isfile') - def test_member_role_exists_true(self, mock_isfile, - mock_auth_values, mock_ksdiscover): - mock_isfile.return_value = True - mock_client = self._mock_ksclient_roles(mock_auth_values, - mock_ksdiscover, - ['admin', '_member_']) - undercloud._member_role_exists() - mock_user = mock_client.users.list.return_value[0] - mock_role = mock_client.roles.list.return_value[1] - mock_client.roles.grant.assert_called_once_with( - mock_role, user=mock_user, project='admin-id') - - @mock.patch('keystoneclient.discover.Discover') - @mock.patch('instack_undercloud.undercloud._get_auth_values') - @mock.patch('os.path.isfile') - def test_has_member_role(self, mock_isfile, mock_auth_values, - mock_ksdiscover): - mock_isfile.return_value = True - mock_client = self._mock_ksclient_roles(mock_auth_values, - mock_ksdiscover, - ['admin', '_member_']) - fake_exception = ks_exceptions.http.Conflict('test') - mock_client.roles.grant.side_effect = fake_exception - undercloud._member_role_exists() - mock_user = mock_client.users.list.return_value[0] - mock_role = mock_client.roles.list.return_value[1] - mock_client.roles.grant.assert_called_once_with( - mock_role, user=mock_user, project='admin-id') - - def _create_flavor_mocks(self): - mock_nova = mock.Mock() - mock_nova.flavors.create = mock.Mock() - mock_flavor = mock.Mock() - mock_nova.flavors.create.return_value = mock_flavor - mock_flavor.set_keys = mock.Mock() - return mock_nova, mock_flavor - - def test_ensure_flavor_no_profile(self): - mock_nova, mock_flavor = self._create_flavor_mocks() - undercloud._ensure_flavor(mock_nova, None, 'test') - mock_nova.flavors.create.assert_called_with('test', 4096, 1, 40) - keys = {'capabilities:boot_option': 'local', - 'resources:CUSTOM_BAREMETAL': '1', - 'resources:DISK_GB': '0', - 'resources:MEMORY_MB': '0', - 'resources:VCPU': '0'} - mock_flavor.set_keys.assert_called_with(keys) - - def test_ensure_flavor_profile(self): - mock_nova, mock_flavor = self._create_flavor_mocks() - undercloud._ensure_flavor(mock_nova, None, 'test', 'test') - mock_nova.flavors.create.assert_called_with('test', 4096, 1, 40) - keys = {'capabilities:boot_option': 'local', - 'capabilities:profile': 'test', - 'resources:CUSTOM_BAREMETAL': '1', - 'resources:DISK_GB': '0', - 'resources:MEMORY_MB': '0', - 'resources:VCPU': '0'} - mock_flavor.set_keys.assert_called_with(keys) - - def test_ensure_flavor_exists(self): - mock_nova, mock_flavor = self._create_flavor_mocks() - mock_nova.flavors.create.side_effect = exceptions.Conflict(None) - flavor = mock.Mock(spec=['name', 'get_keys', 'set_keys']) - flavor.get_keys.return_value = {'foo': 'bar'} - - undercloud._ensure_flavor(mock_nova, flavor, 'test') - - keys = {'foo': 'bar', - 'resources:CUSTOM_BAREMETAL': '1', - 'resources:DISK_GB': '0', - 'resources:MEMORY_MB': '0', - 'resources:VCPU': '0'} - flavor.set_keys.assert_called_with(keys) - mock_nova.flavors.create.assert_not_called() - - @mock.patch.object(undercloud.LOG, 'warning', autospec=True) - def test_ensure_flavor_exists_conflicting_rc(self, mock_warn): - mock_nova, mock_flavor = self._create_flavor_mocks() - mock_nova.flavors.create.side_effect = exceptions.Conflict(None) - flavor = mock.Mock(spec=['name', 'get_keys', 'set_keys']) - flavor.get_keys.return_value = {'foo': 'bar', - 'resources:CUSTOM_FOO': '42'} - - undercloud._ensure_flavor(mock_nova, flavor, 'test') - - flavor.set_keys.assert_not_called() - mock_warn.assert_called_once_with(mock.ANY, flavor.name, - 'resources:CUSTOM_FOO') - mock_nova.flavors.create.assert_not_called() - - def test_ensure_node_resource_classes(self): - nodes = [mock.Mock(uuid='1', resource_class=None), - mock.Mock(uuid='2', resource_class='foobar')] - ironic_mock = mock.Mock() - ironic_mock.node.list.return_value = nodes - - undercloud._ensure_node_resource_classes(ironic_mock) - - ironic_mock.node.update.assert_called_once_with( - '1', [{'path': '/resource_class', 'op': 'add', - 'value': 'baremetal'}]) - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_migrate_to_convergence(self, mock_run_command): - stacks = [mock.Mock(id='1'), mock.Mock(id='2')] - mock_heat = mock.Mock() - mock_heat.stacks.list.return_value = stacks - undercloud._migrate_to_convergence(mock_heat) - self.assertEqual([mock.call(['sudo', '-E', 'heat-manage', - 'migrate_convergence_1', '1'], - name='heat-manage'), - mock.call(['sudo', '-E', 'heat-manage', - 'migrate_convergence_1', '2'], - name='heat-manage')], - mock_run_command.mock_calls) - - @mock.patch('instack_undercloud.undercloud._run_command') - def test_migrate_to_convergence_no_stacks(self, mock_run_command): - stacks = [] - mock_heat = mock.Mock() - mock_heat.stacks.list.return_value = stacks - undercloud._migrate_to_convergence(mock_heat) - mock_run_command.assert_not_called() - - @mock.patch('instack_undercloud.undercloud._extract_from_stackrc') - @mock.patch('instack_undercloud.undercloud._run_command') - def test_get_auth_values(self, mock_run, mock_extract): - mock_run.return_value = '3nigma' - mock_extract.side_effect = ['aturing', 'hut8', - 'http://bletchley:5000/v2.0'] - values = undercloud._get_auth_values() - expected = ('aturing', '3nigma', 'hut8', 'http://bletchley:5000/v2.0') - self.assertEqual(expected, values) - - def test_delete_default_flavors(self): - class FakeFlavor(object): - def __init__(self, id_, name): - self.id = id_ - self.name = name - mock_instance = mock.Mock() - mock_flavors = [FakeFlavor('f00', 'foo'), - FakeFlavor('8ar', 'm1.large')] - mock_instance.flavors.list.return_value = mock_flavors - undercloud._delete_default_flavors(mock_instance) - mock_instance.flavors.delete.assert_called_once_with('8ar') - - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('os.listdir') - @mock.patch('instack_undercloud.undercloud._create_mistral_config_' - 'environment') - @mock.patch('instack_undercloud.undercloud._create_default_plan') - @mock.patch('instack_undercloud.undercloud._upload_validations_to_swift') - def test_post_config_mistral(self, mock_upload, mock_create, mock_cmce, - mock_listdir, mock_isfile): - instack_env = {} - mock_mistral = mock.Mock() - mock_swift = mock.Mock() - mock_swift.get_account.return_value = [None, [{'name': 'hut8'}]] - - mock_workbooks = [mock.Mock() for m in range(2)] - mock_workbooks[0].name = 'foo' - mock_workbooks[1].name = 'tripleo.bar' - mock_mistral.workbooks.list.return_value = mock_workbooks - mock_triggers = [mock.Mock() for m in range(2)] - mock_triggers[0].name = 'dont_delete_me' - mock_triggers[0].workflow_name = 'tripleo.foo' - mock_triggers[1].name = 'delete_me' - mock_triggers[1].workflow_name = 'tripleo.bar' - mock_mistral.cron_triggers.list.return_value = mock_triggers - mock_workflows = [mock.Mock() for m in range(2)] - mock_workflows[0].name = 'tripleo.foo' - mock_workflows[1].name = 'tripleo.bar' - mock_workflows[0].tags = [] - mock_workflows[1].tags = ['tripleo-common-managed', ] - mock_mistral.workflows.list.return_value = mock_workflows - - mock_listdir.return_value = ['foo.yaml', 'bar.yaml'] - undercloud._post_config_mistral(instack_env, mock_mistral, mock_swift) - self.assertEqual([mock.call('tripleo.bar')], - mock_mistral.workbooks.delete.mock_calls) - self.assertEqual([mock.call('tripleo.bar')], - mock_mistral.workflows.delete.mock_calls) - self.assertEqual([mock.call('delete_me')], - mock_mistral.cron_triggers.delete.mock_calls) - self.assertEqual([mock.call(undercloud.PATHS.WORKBOOK_PATH + - '/foo.yaml'), - mock.call(undercloud.PATHS.WORKBOOK_PATH + - '/bar.yaml')], - mock_mistral.workbooks.create.mock_calls) - mock_cmce.assert_called_once_with(instack_env, mock_mistral) - mock_create.assert_called_once_with(mock_mistral, ['hut8']) - mock_upload.assert_called_once_with(mock_mistral) - - def _neutron_mocks(self): - mock_sdk = mock.MagicMock() - mock_sdk.network.create_network = mock.Mock() - mock_sdk.network.create_segment = mock.Mock() - mock_sdk.network.update_segment = mock.Mock() - mock_sdk.network.delete_segment = mock.Mock() - mock_sdk.network.create_subnet = mock.Mock() - mock_sdk.network.update_subnet = mock.Mock() - return mock_sdk - - def test_network_create(self): - mock_sdk = self._neutron_mocks() - mock_sdk.network.networks.return_value = iter([]) - segment_mock = mock.Mock() - mock_sdk.network.segments.return_value = iter([segment_mock]) - undercloud._ensure_neutron_network(mock_sdk) - mock_sdk.network.create_network.assert_called_with( - name='ctlplane', provider_network_type='flat', - provider_physical_network='ctlplane', mtu=1500) - - def test_delete_default_segment(self): - mock_sdk = self._neutron_mocks() - mock_sdk.network.networks.return_value = iter([]) - segment_mock = mock.Mock() - mock_sdk.network.segments.return_value = iter([segment_mock]) - undercloud._ensure_neutron_network(mock_sdk) - mock_sdk.network.delete_segment.assert_called_with( - segment_mock.id) - - def test_network_exists(self): - mock_sdk = self._neutron_mocks() - mock_sdk.network.networks.return_value = iter(['ctlplane']) - undercloud._ensure_neutron_network(mock_sdk) - mock_sdk.network.create_network.assert_not_called() - - def test_segment_create(self): - mock_sdk = self._neutron_mocks() - undercloud._neutron_segment_create(mock_sdk, 'ctlplane-subnet', - 'network_id', 'ctlplane') - mock_sdk.network.create_segment.assert_called_with( - name='ctlplane-subnet', network_id='network_id', - physical_network='ctlplane', network_type='flat') - - def test_segment_update(self): - mock_sdk = self._neutron_mocks() - undercloud._neutron_segment_update(mock_sdk, - 'network_id', 'ctlplane-subnet') - mock_sdk.network.update_segment.assert_called_with( - 'network_id', name='ctlplane-subnet') - - def test_subnet_create(self): - mock_sdk = self._neutron_mocks() - host_routes = [{'destination': '169.254.169.254/32', - 'nexthop': '192.168.24.1'}] - allocation_pool = [{'start': '192.168.24.5', 'end': '192.168.24.24'}] - undercloud._neutron_subnet_create(mock_sdk, 'network_id', - '192.168.24.0/24', '192.168.24.1', - host_routes, allocation_pool, - 'ctlplane-subnet', 'segment_id') - mock_sdk.network.create_subnet.assert_called_with( - name='ctlplane-subnet', cidr='192.168.24.0/24', - gateway_ip='192.168.24.1', host_routes=host_routes, enable_dhcp=True, - ip_version='4', allocation_pools=allocation_pool, - network_id='network_id', segment_id='segment_id') - - def test_subnet_update(self): - mock_sdk = self._neutron_mocks() - host_routes = [{'destination': '169.254.169.254/32', - 'nexthop': '192.168.24.1'}] - allocation_pool = [{'start': '192.168.24.5', 'end': '192.168.24.24'}] - undercloud._neutron_subnet_update(mock_sdk, 'subnet_id', - '192.168.24.1', host_routes, - allocation_pool, 'ctlplane-subnet') - mock_sdk.network.update_subnet.assert_called_with( - 'subnet_id', name='ctlplane-subnet', gateway_ip='192.168.24.1', - host_routes=host_routes, allocation_pools=allocation_pool) - - @mock.patch('instack_undercloud.undercloud._neutron_subnet_update') - @mock.patch('instack_undercloud.undercloud._get_subnet') - def test_no_neutron_segments_if_pre_segments_undercloud( - self, mock_get_subnet, mock_neutron_subnet_update): - mock_sdk = self._neutron_mocks() - mock_subnet = mock.Mock() - mock_subnet.segment_id = None - mock_get_subnet.return_value = mock_subnet - undercloud._config_neutron_segments_and_subnets(mock_sdk, - 'ctlplane_id') - mock_sdk.network.create_segment.assert_not_called() - mock_sdk.network.update_segment.assert_not_called() - mock_neutron_subnet_update.called_once() - - @mock.patch('instack_undercloud.undercloud._neutron_segment_create') - @mock.patch('instack_undercloud.undercloud._neutron_subnet_create') - @mock.patch('instack_undercloud.undercloud._get_segment') - @mock.patch('instack_undercloud.undercloud._get_subnet') - def test_segment_and_subnet_create(self, mock_get_subnet, mock_get_segment, - mock_neutron_subnet_create, - mock_neutron_segment_create): - mock_sdk = self._neutron_mocks() - mock_get_subnet.return_value = None - mock_get_segment.return_value = None - undercloud._config_neutron_segments_and_subnets(mock_sdk, - 'ctlplane_id') - mock_neutron_segment_create.assert_called_with( - mock_sdk, 'ctlplane-subnet', 'ctlplane_id', 'ctlplane') - host_routes = [{'destination': '169.254.169.254/32', - 'nexthop': '192.168.24.1'}] - allocation_pool = [{'start': '192.168.24.5', 'end': '192.168.24.24'}] - mock_neutron_subnet_create.assert_called_with( - mock_sdk, 'ctlplane_id', '192.168.24.0/24', '192.168.24.1', - host_routes, allocation_pool, 'ctlplane-subnet', - mock_neutron_segment_create().id) - - @mock.patch('instack_undercloud.undercloud._neutron_segment_update') - @mock.patch('instack_undercloud.undercloud._neutron_subnet_update') - @mock.patch('instack_undercloud.undercloud._get_segment') - @mock.patch('instack_undercloud.undercloud._get_subnet') - def test_segment_and_subnet_update(self, mock_get_subnet, mock_get_segment, - mock_neutron_subnet_update, - mock_neutron_segment_update): - mock_sdk = self._neutron_mocks() - mock_subnet = mock.Mock() - mock_subnet.id = 'subnet_id' - mock_subnet.segment_id = 'segment_id' - mock_get_subnet.return_value = mock_subnet - mock_segment = mock.Mock() - mock_get_segment.return_value = mock_segment - mock_segment.id = 'segment_id' - undercloud._config_neutron_segments_and_subnets(mock_sdk, - 'ctlplane_id') - mock_neutron_segment_update.assert_called_with( - mock_sdk, mock_subnet.segment_id, 'ctlplane-subnet') - host_routes = [{'destination': '169.254.169.254/32', - 'nexthop': '192.168.24.1'}] - allocation_pool = [{'start': '192.168.24.5', 'end': '192.168.24.24'}] - mock_neutron_subnet_update.assert_called_with( - mock_sdk, 'subnet_id', '192.168.24.1', host_routes, - allocation_pool, 'ctlplane-subnet') - - @mock.patch('instack_undercloud.undercloud._get_segment') - @mock.patch('instack_undercloud.undercloud._get_subnet') - def test_local_subnet_cidr_conflict(self, mock_get_subnet, - mock_get_segment): - mock_sdk = self._neutron_mocks() - mock_sdk = self._neutron_mocks() - mock_subnet = mock.Mock() - mock_subnet.id = 'subnet_id' - mock_subnet.segment_id = 'existing_segment_id' - mock_get_subnet.return_value = mock_subnet - mock_segment = mock.Mock() - mock_get_segment.return_value = mock_segment - mock_segment.id = 'segment_id' - self.assertRaises( - RuntimeError, - undercloud._config_neutron_segments_and_subnets, [mock_sdk], - ['ctlplane_id']) - - -class TestUpgradeFact(base.BaseTestCase): - @mock.patch('instack_undercloud.undercloud._run_command') - @mock.patch('os.path.dirname') - @mock.patch('os.path.exists') - @mock.patch.object(tempfile, 'mkstemp', return_value=(1, '/tmp/file')) - def test_upgrade_fact(self, mock_mkstemp, mock_exists, mock_dirname, - mock_run): - fact_path = '/etc/facter/facts.d/undercloud_upgrade.txt' - mock_dirname.return_value = '/etc/facter/facts.d' - mock_exists.side_effect = [False, True] - - with mock.patch('instack_undercloud.undercloud.open') as mock_open: - undercloud._handle_upgrade_fact(True) - mock_open.assert_called_with('/tmp/file', 'w') - - run_calls = [ - mock.call(['sudo', 'mkdir', '-p', '/etc/facter/facts.d']), - mock.call(['sudo', 'mv', '/tmp/file', fact_path]), - mock.call(['sudo', 'chmod', '0644', fact_path]) - ] - mock_run.assert_has_calls(run_calls) - self.assertEqual(mock_run.call_count, 3) - - @mock.patch('instack_undercloud.undercloud._run_command') - @mock.patch('os.path.dirname') - @mock.patch('os.path.exists') - @mock.patch.object(tempfile, 'mkstemp', return_value=(1, '/tmp/file')) - def test_upgrade_fact_install(self, mock_mkstemp, mock_exists, - mock_dirname, mock_run): - mock_dirname.return_value = '/etc/facter/facts.d' - mock_exists.return_value = False - - with mock.patch('instack_undercloud.undercloud.open') as mock_open: - undercloud._handle_upgrade_fact(False) - mock_open.assert_not_called() - - mock_run.assert_not_called() - - @mock.patch('instack_undercloud.undercloud._run_command') - @mock.patch('os.path.dirname') - @mock.patch('os.path.exists') - @mock.patch.object(tempfile, 'mkstemp', return_value=(1, '/tmp/file')) - def test_upgrade_fact_upgrade_after_install(self, mock_mkstemp, - mock_exists, mock_dirname, - mock_run): - fact_path = '/etc/facter/facts.d/undercloud_upgrade.txt' - mock_dirname.return_value = '/etc/facter/facts.d' - mock_exists.return_value = True - - with mock.patch('instack_undercloud.undercloud.open') as open_m: - undercloud._handle_upgrade_fact(True) - open_m.assert_called_with('/tmp/file', 'w') - - run_calls = [ - mock.call(['sudo', 'mv', '/tmp/file', fact_path]), - mock.call(['sudo', 'chmod', '0644', fact_path]) - ] - mock_run.assert_has_calls(run_calls) - self.assertEqual(mock_run.call_count, 2) - - -class TestInstackEnvironment(BaseTestCase): - def test_set_allowed_keys(self): - env = undercloud.InstackEnvironment() - env['HOSTNAME'] = 'localhost1' - env['INSPECTION_COLLECTORS'] = 'a,b,c' - - def test_set_unknown_keys(self): - env = undercloud.InstackEnvironment() - - def _set(): - env['CATS_AND_DOGS_PATH'] = '/home' - - self.assertRaisesRegex(KeyError, 'CATS_AND_DOGS_PATH', _set) - - def test_get_always_allowed(self): - env = undercloud.InstackEnvironment() - env.get('HOSTNAME') - env.get('CATS_AND_DOGS_PATH') diff --git a/instack_undercloud/tests/test_validator.py b/instack_undercloud/tests/test_validator.py deleted file mode 100644 index 4702e3f74..000000000 --- a/instack_undercloud/tests/test_validator.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_config import fixture as config_fixture -from oslo_config import cfg -from oslotest import base - -from instack_undercloud import undercloud -from instack_undercloud import validator - - -class TestValidator(base.BaseTestCase): - def setUp(self): - super(TestValidator, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - # ctlplane-subnet - config group options - self.grp0 = cfg.OptGroup(name='ctlplane-subnet', - title='ctlplane-subnet') - self.opts = [cfg.StrOpt('cidr'), - cfg.StrOpt('dhcp_start'), - cfg.StrOpt('dhcp_end'), - cfg.StrOpt('inspection_iprange'), - cfg.StrOpt('gateway'), - cfg.BoolOpt('masquerade')] - self.conf.register_opts(self.opts, group=self.grp0) - self.grp1 = cfg.OptGroup(name='subnet1', title='subnet1') - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', dhcp_end='192.168.24.24', - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', masquerade=True, - group='ctlplane-subnet') - - @mock.patch('netifaces.interfaces') - def test_validation_passes(self, ifaces_mock): - ifaces_mock.return_value = ['eth1'] - undercloud._validate_network() - - def test_fail_on_local_ip(self): - self.conf.config(local_ip='193.0.2.1/24') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_network_gateway(self): - self.conf.config(gateway='193.0.2.1', group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_dhcp_start(self): - self.conf.config(dhcp_start='193.0.2.10', group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_dhcp_end(self): - self.conf.config(dhcp_end='193.0.2.10', group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_inspection_start(self): - self.conf.config(inspection_iprange='193.0.2.100,192.168.24.120', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_inspection_end(self): - self.conf.config(inspection_iprange='192.168.24.100,193.0.2.120', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_dhcp_order(self): - self.conf.config(dhcp_start='192.168.24.100', dhcp_end='192.168.24.10', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_dhcp_equal(self): - self.conf.config(dhcp_start='192.168.24.100', - dhcp_end='192.168.24.100', group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_inspection_order(self): - self.conf.config(inspection_iprange='192.168.24.120,192.168.24.100', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_inspection_equal(self): - self.conf.config(inspection_iprange='192.168.24.120,192.168.24.120', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_range_overlap_1(self): - self.conf.config(dhcp_start='192.168.24.10', dhcp_end='192.168.24.100', - inspection_iprange='192.168.24.90,192.168.24.110', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_range_overlap_2(self): - self.conf.config(dhcp_start='192.168.24.100', - dhcp_end='192.168.24.120', - inspection_iprange='192.168.24.90,192.168.24.110', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_range_overlap_3(self): - self.conf.config(dhcp_start='192.168.24.20', dhcp_end='192.168.24.90', - inspection_iprange='192.168.24.10,192.168.24.100', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_range_overlap_4(self): - self.conf.config(dhcp_start='192.168.24.10', dhcp_end='192.168.24.100', - inspection_iprange='192.168.24.20,192.168.24.90', - group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_invalid_local_ip(self): - self.conf.config(local_ip='192.168.24.1') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_unqualified_hostname(self): - self.conf.config(undercloud_hostname='undercloud') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_no_alter_params(self): - self.conf.config(cidr='192.168.24.0/24', group='ctlplane-subnet') - params = {opt.name: self.conf.conf[opt.name] - for opt in undercloud._opts} - params.update( - {opt.name: self.conf.conf.get('ctlplane-subnet')[opt.name] - for opt in undercloud._subnets_opts}) - save_params = dict(params) - validator.validate_config(params, lambda x: None) - self.assertEqual(save_params, params) - - @mock.patch('netifaces.interfaces') - def test_valid_undercloud_nameserver_passes(self, ifaces_mock): - ifaces_mock.return_value = ['eth1'] - self.conf.config(undercloud_nameservers=['192.168.24.4', - '192.168.24.5']) - undercloud._validate_network() - - def test_invalid_undercloud_nameserver_fails(self): - self.conf.config(undercloud_nameservers=['Iamthewalrus']) - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_invalid_public_host(self): - self.conf.config(undercloud_public_host='192.0.3.2', - undercloud_service_certificate='foo.pem', - enable_ui=False) - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - def test_fail_on_invalid_admin_host(self): - self.conf.config(undercloud_admin_host='192.0.3.3', - generate_service_certificate=True, - enable_ui=False) - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - @mock.patch('netifaces.interfaces') - def test_ssl_hosts_allowed(self, ifaces_mock): - ifaces_mock.return_value = ['eth1'] - self.conf.config(undercloud_public_host='public.domain', - undercloud_admin_host='admin.domain', - undercloud_service_certificate='foo.pem', - enable_ui=False) - undercloud._validate_network() - - @mock.patch('netifaces.interfaces') - def test_allow_all_with_ui(self, ifaces_mock): - ifaces_mock.return_value = ['eth1'] - self.conf.config(undercloud_admin_host='10.0.0.10', - generate_service_certificate=True, - enable_ui=True) - undercloud._validate_network() - - @mock.patch('netifaces.interfaces') - def test_fail_on_invalid_ip(self, ifaces_mock): - ifaces_mock.return_value = ['eth1'] - self.conf.config(dhcp_start='foo.bar', group='ctlplane-subnet') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - @mock.patch('netifaces.interfaces') - def test_validate_interface_exists(self, ifaces_mock): - ifaces_mock.return_value = ['eth0', 'eth1'] - self.conf.config(local_interface='eth0') - undercloud._validate_network() - - @mock.patch('netifaces.interfaces') - def test_fail_validate_interface_missing(self, ifaces_mock): - ifaces_mock.return_value = ['eth0', 'eth1'] - self.conf.config(local_interface='em1') - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - @mock.patch('netifaces.interfaces') - def test_validate_interface_with_net_config_override(self, ifaces_mock): - ifaces_mock.return_value = ['eth0', 'eth1'] - self.conf.config(local_interface='em2', net_config_override='foo') - undercloud._validate_network() - - def test_validate_additional_architectures_ok(self): - self.conf.config(additional_architectures=['ppc64le'], - ipxe_enabled=False) - undercloud._validate_architecure_options() - - def test_validate_additional_architectures_bad_arch(self): - self.conf.config(additional_architectures=['ppc64le', 'INVALID'], - ipxe_enabled=False) - self.assertRaises(validator.FailedValidation, - undercloud._validate_architecure_options) - - def test_validate_additional_architectures_ipxe_fail(self): - self.conf.config(additional_architectures=['ppc64le'], - ipxe_enabled=True) - self.assertRaises(validator.FailedValidation, - undercloud._validate_architecure_options) - - @mock.patch('netifaces.interfaces') - def test_validate_routed_networks_not_enabled_pass(self, ifaces_mock): - ifaces_mock.return_value = ['eth0', 'eth1'] - self.conf.config(enable_routed_networks=False) - self.conf.config(subnets=['ctlplane-subnet']) - undercloud._validate_network() - - @mock.patch('netifaces.interfaces') - def test_validate_routed_networks_not_enabled_fail(self, ifaces_mock): - ifaces_mock.return_value = ['eth0', 'eth1'] - self.conf.config(enable_routed_networks=False) - self.conf.config(subnets=['ctlplane-subnet', 'subnet1']) - self.assertRaises(validator.FailedValidation, - undercloud._validate_network) - - @mock.patch('netifaces.interfaces') - def test_validate_routed_networks_enabled_pass(self, ifaces_mock): - ifaces_mock.return_value = ['eth0', 'eth1'] - self.conf.config(enable_routed_networks=True) - self.conf.config(subnets=['ctlplane-subnet', 'subnet1']) - self.conf.register_opts(self.opts, group=self.grp1) - self.conf.config(cidr='192.168.24.0/24', - dhcp_start='192.168.24.5', dhcp_end='192.168.24.24', - inspection_iprange='192.168.24.100,192.168.24.120', - gateway='192.168.24.1', masquerade=True, - group='ctlplane-subnet') - self.conf.config(cidr='192.168.10.0/24', dhcp_start='192.168.10.10', - dhcp_end='192.168.10.99', - inspection_iprange='192.168.10.100,192.168.10.189', - gateway='192.168.10.254', masquerade=True, - group='subnet1') - undercloud._validate_network() diff --git a/instack_undercloud/undercloud.py b/instack_undercloud/undercloud.py deleted file mode 100644 index 341921a2a..000000000 --- a/instack_undercloud/undercloud.py +++ /dev/null @@ -1,2405 +0,0 @@ -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import copy -import errno -import getpass -import glob -import hashlib -import json -import logging -import netaddr -import os -import platform -import re -import socket -import subprocess -import sys -import tempfile -import time -import uuid - -from ironicclient import client as ir_client -from keystoneauth1 import session -from keystoneauth1 import exceptions as ks_exceptions -from keystoneclient import discover -import keystoneauth1.identity.generic as ks_auth -from mistralclient.api import client as mistralclient -from mistralclient.api import base as mistralclient_exc -from novaclient import client as novaclient -from novaclient import exceptions -import os_client_config -from oslo_config import cfg -from oslo_utils import netutils -import psutil -import pystache -import six -from swiftclient import client as swiftclient - -from instack_undercloud import validator - - -# Making these values properties on a class allows us to delay their lookup, -# which makes testing code that interacts with these files much easier. -# NOTE(bnemec): The unit tests rely on these paths being in ~. If they are -# ever moved the tests may need to be updated to avoid overwriting real files. -class Paths(object): - @property - def CONF_PATH(self): - return os.path.expanduser('~/undercloud.conf') - - # NOTE(bnemec): Deprecated - @property - def ANSWERS_PATH(self): - return os.path.expanduser('~/instack.answers') - - @property - def PASSWORD_PATH(self): - return os.path.expanduser('~/undercloud-passwords.conf') - - @property - def LOG_FILE(self): - return os.path.expanduser('~/.instack/install-undercloud.log') - - @property - def WORKBOOK_PATH(self): - return '/usr/share/openstack-tripleo-common/workbooks' - - -PATHS = Paths() -DEFAULT_LOG_LEVEL = logging.DEBUG -DEFAULT_LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s' -DEFAULT_NODE_RESOURCE_CLASS = 'baremetal' -LOG = None -CONF = cfg.CONF -COMPLETION_MESSAGE = """ -############################################################################# -Undercloud %(undercloud_operation)s complete. - -The file containing this installation's passwords is at -%(password_path)s. - -There is also a stackrc file at %(stackrc_path)s. - -These files are needed to interact with the OpenStack services, and should be -secured. - -############################################################################# -""" -FAILURE_MESSAGE = """ -############################################################################# -Undercloud %(undercloud_operation)s failed. - -Reason: %(exception)s - -See the previous output for details about what went wrong. The full install -log can be found at %(log_file)s. - -############################################################################# -""" -# We need 8 GB, leave a little room for variation in what 8 GB means on -# different platforms. -REQUIRED_MB = 7680 -# Control plane network name -PHYSICAL_NETWORK = 'ctlplane' -SUBNETS_DEFAULT = ['ctlplane-subnet'] - -# Deprecated options -_deprecated_opt_network_gateway = [cfg.DeprecatedOpt( - 'network_gateway', group='DEFAULT')] -_deprecated_opt_network_cidr = [cfg.DeprecatedOpt( - 'network_cidr', group='DEFAULT')] -_deprecated_opt_dhcp_start = [cfg.DeprecatedOpt( - 'dhcp_start', group='DEFAULT')] -_deprecated_opt_dhcp_end = [cfg.DeprecatedOpt('dhcp_end', group='DEFAULT')] -_deprecated_opt_inspection_iprange = [cfg.DeprecatedOpt( - 'inspection_iprange', group='DEFAULT')] - -# When adding new options to the lists below, make sure to regenerate the -# sample config by running "tox -e genconfig" in the project root. -_opts = [ - cfg.StrOpt('undercloud_hostname', - help=('Fully qualified hostname (including domain) to set on ' - 'the Undercloud. If left unset, the ' - 'current hostname will be used, but the user is ' - 'responsible for configuring all system hostname ' - 'settings appropriately. If set, the undercloud install ' - 'will configure all system hostname settings.'), - ), - cfg.StrOpt('local_ip', - default='192.168.24.1/24', - help=('IP information for the interface on the Undercloud ' - 'that will be handling the PXE boots and DHCP for ' - 'Overcloud instances. The IP portion of the value will ' - 'be assigned to the network interface defined by ' - 'local_interface, with the netmask defined by the ' - 'prefix portion of the value.') - ), - cfg.StrOpt('undercloud_public_host', - deprecated_name='undercloud_public_vip', - default='192.168.24.2', - help=('Virtual IP or DNS address to use for the public ' - 'endpoints of Undercloud services. Only used with SSL.') - ), - cfg.StrOpt('undercloud_admin_host', - deprecated_name='undercloud_admin_vip', - default='192.168.24.3', - help=('Virtual IP or DNS address to use for the admin ' - 'endpoints of Undercloud services. Only used with SSL.') - ), - cfg.ListOpt('undercloud_nameservers', - default=[], - help=('DNS nameserver(s) to use for the undercloud node.'), - ), - cfg.ListOpt('undercloud_ntp_servers', - default=[], - help=('List of ntp servers to use.')), - cfg.StrOpt('overcloud_domain_name', - default='localdomain', - help=('DNS domain name to use when deploying the overcloud. ' - 'The overcloud parameter "CloudDomain" must be set to a ' - 'matching value.') - ), - cfg.ListOpt('subnets', - default=SUBNETS_DEFAULT, - help=('List of routed network subnets for provisioning ' - 'and introspection. Comma separated list of names/tags. ' - 'For each network a section/group needs to be added to ' - 'the configuration file with these parameters set: ' - 'cidr, dhcp_start, dhcp_end, inspection_iprange, ' - 'gateway and masquerade. Note: The section/group must ' - 'be placed before or after any other section. (See ' - 'the example section [ctlplane-subnet] in the sample ' - 'configuration file.)')), - cfg.StrOpt('local_subnet', - default=SUBNETS_DEFAULT[0], - help=('Name of the local subnet, where the PXE boot and DHCP ' - 'interfaces for overcloud instances is located. The IP ' - 'address of the local_ip/local_interface should reside ' - 'in this subnet.')), - cfg.StrOpt('undercloud_service_certificate', - default='', - help=('Certificate file to use for OpenStack service SSL ' - 'connections. Setting this enables SSL for the ' - 'OpenStack API endpoints, leaving it unset disables SSL.') - ), - cfg.BoolOpt('generate_service_certificate', - default=True, - help=('When set to True, an SSL certificate will be generated ' - 'as part of the undercloud install and this certificate ' - 'will be used in place of the value for ' - 'undercloud_service_certificate. The resulting ' - 'certificate will be written to ' - '/etc/pki/tls/certs/undercloud-[undercloud_public_host].' - 'pem. This certificate is signed by CA selected by the ' - '"certificate_generation_ca" option.') - ), - cfg.StrOpt('certificate_generation_ca', - default='local', - help=('The certmonger nickname of the CA from which the ' - 'certificate will be requested. This is used only if ' - 'the generate_service_certificate option is set. ' - 'Note that if the "local" CA is selected the ' - 'certmonger\'s local CA certificate will be extracted to ' - '/etc/pki/ca-trust/source/anchors/cm-local-ca.pem and ' - 'subsequently added to the trust chain.') - - ), - cfg.StrOpt('service_principal', - default='', - help=('The kerberos principal for the service that will use ' - 'the certificate. This is only needed if your CA ' - 'requires a kerberos principal. e.g. with FreeIPA.') - ), - cfg.StrOpt('local_interface', - default='eth1', - help=('Network interface on the Undercloud that will be ' - 'handling the PXE boots and DHCP for Overcloud ' - 'instances.') - ), - cfg.IntOpt('local_mtu', - default=1500, - help=('MTU to use for the local_interface.') - ), - cfg.StrOpt('masquerade_network', - default='192.168.24.0/24', - deprecated_for_removal=True, - deprecated_reason=('With support for routed networks, ' - 'masquerading of the provisioning networks ' - 'is moved to a boolean option for each ' - 'subnet.'), - help=('Network that will be masqueraded for external access, ' - 'if required. This should be the subnet used for PXE ' - 'booting.') - ), - cfg.StrOpt('hieradata_override', - default='', - help=('Path to hieradata override file. If set, the file will ' - 'be copied under /etc/puppet/hieradata and set as the ' - 'first file in the hiera hierarchy. This can be used ' - 'to custom configure services beyond what ' - 'undercloud.conf provides') - ), - cfg.StrOpt('net_config_override', - default='', - help=('Path to network config override template. If set, this ' - 'template will be used to configure the networking via ' - 'os-net-config. Must be in json format. ' - 'Templated tags can be used within the ' - 'template, see ' - 'instack-undercloud/elements/undercloud-stack-config/' - 'net-config.json.template for example tags') - ), - cfg.StrOpt('inspection_interface', - default='br-ctlplane', - deprecated_name='discovery_interface', - help=('Network interface on which inspection dnsmasq will ' - 'listen. If in doubt, use the default value.') - ), - cfg.BoolOpt('inspection_extras', - default=True, - help=('Whether to enable extra hardware collection during ' - 'the inspection process. Requires python-hardware or ' - 'python-hardware-detect package on the introspection ' - 'image.')), - cfg.BoolOpt('inspection_runbench', - default=False, - deprecated_name='discovery_runbench', - help=('Whether to run benchmarks when inspecting nodes. ' - 'Requires inspection_extras set to True.') - ), - cfg.BoolOpt('enable_node_discovery', - default=False, - help=('Makes ironic-inspector enroll any unknown node that ' - 'PXE-boots introspection ramdisk in Ironic. By default, ' - 'the "ipmi" driver is used for new nodes (it is ' - 'automatically enabled when this option is set to True).' - ' Set discovery_default_driver to override. ' - 'Introspection rules can also be used to specify driver ' - 'information for newly enrolled nodes.') - ), - cfg.StrOpt('discovery_default_driver', - default='ipmi', - help=('The default hardware type to use for newly discovered ' - 'nodes (requires enable_node_discovery set to True). ' - 'It is automatically added to enabled_hardware_types.') - ), - cfg.BoolOpt('undercloud_debug', - default=True, - help=('Whether to enable the debug log level for Undercloud ' - 'OpenStack services.') - ), - cfg.BoolOpt('undercloud_update_packages', - default=True, - help=('Whether to update packages during the Undercloud ' - 'install.') - ), - cfg.BoolOpt('enable_tempest', - default=True, - help=('Whether to install Tempest in the Undercloud.') - ), - cfg.BoolOpt('enable_telemetry', - default=False, - help=('Whether to install Telemetry services ' - '(ceilometer, gnocchi, aodh, panko ) in the Undercloud.') - ), - cfg.BoolOpt('enable_ui', - default=True, - help=('Whether to install the TripleO UI.') - ), - cfg.BoolOpt('enable_validations', - default=True, - help=('Whether to install requirements to run the TripleO ' - 'validations.') - ), - cfg.BoolOpt('enable_cinder', - default=False, - help=('Whether to install the Volume service. It is not ' - 'currently used in the undercloud.')), - cfg.BoolOpt('enable_novajoin', - default=False, - help=('Whether to install novajoin metadata service in ' - 'the Undercloud.') - ), - cfg.BoolOpt('enable_container_images_build', - default=True, - help=('Whether to enable docker container images to be build ' - 'on the undercloud.') - ), - cfg.ListOpt('docker_insecure_registries', - default=[], - help=('Array of host/port combiniations of docker insecure ' - 'registries.') - ), - cfg.StrOpt('ipa_otp', - default='', - help=('One Time Password to register Undercloud node with ' - 'an IPA server. ' - 'Required when enable_novajoin = True.') - ), - cfg.BoolOpt('ipxe_enabled', - default=True, - help=('Whether to use iPXE for deploy and inspection.'), - deprecated_name='ipxe_deploy', - ), - cfg.IntOpt('scheduler_max_attempts', - default=30, min=1, - help=('Maximum number of attempts the scheduler will make ' - 'when deploying the instance. You should keep it ' - 'greater or equal to the number of bare metal nodes ' - 'you expect to deploy at once to work around ' - 'potential race condition when scheduling.')), - cfg.BoolOpt('clean_nodes', - default=False, - help=('Whether to clean overcloud nodes (wipe the hard drive) ' - 'between deployments and after the introspection.')), - cfg.ListOpt('enabled_hardware_types', - default=['ipmi', 'redfish', 'ilo', 'idrac'], - help=('List of enabled bare metal hardware types (next ' - 'generation drivers).')), - cfg.StrOpt('docker_registry_mirror', - default='', - help=('An optional docker \'registry-mirror\' that will be' - 'configured in /etc/docker/daemon.json.') - ), - cfg.ListOpt('additional_architectures', - default=[], - help=('List of additional architectures enabled in your cloud ' - 'environment. The list of supported values is: %s' - % ' '.join(validator.SUPPORTED_ARCHITECTURES))), - cfg.BoolOpt('enable_routed_networks', - default=False, - help=('Enable support for routed ctlplane networks.')), -] - -# Routed subnets -_subnets_opts = [ - cfg.StrOpt('cidr', - default='192.168.24.0/24', - deprecated_opts=_deprecated_opt_network_cidr, - help=('Network CIDR for the Neutron-managed subnet for ' - 'Overcloud instances.')), - cfg.StrOpt('dhcp_start', - default='192.168.24.5', - deprecated_opts=_deprecated_opt_dhcp_start, - help=('Start of DHCP allocation range for PXE and DHCP of ' - 'Overcloud instances on this network.')), - cfg.StrOpt('dhcp_end', - default='192.168.24.24', - deprecated_opts=_deprecated_opt_dhcp_end, - help=('End of DHCP allocation range for PXE and DHCP of ' - 'Overcloud instances on this network.')), - cfg.StrOpt('inspection_iprange', - default='192.168.24.100,192.168.24.120', - deprecated_opts=_deprecated_opt_inspection_iprange, - help=('Temporary IP range that will be given to nodes on this ' - 'network during the inspection process. Should not ' - 'overlap with the range defined by dhcp_start and ' - 'dhcp_end, but should be in the same ip subnet.')), - cfg.StrOpt('gateway', - default='192.168.24.1', - deprecated_opts=_deprecated_opt_network_gateway, - help=('Network gateway for the Neutron-managed network for ' - 'Overcloud instances on this network.')), - cfg.BoolOpt('masquerade', - default=False, - help=('The network will be masqueraded for external access.')), -] - -# Passwords, tokens, hashes -_auth_opts = [ - cfg.StrOpt('undercloud_db_password', - help=('Password used for MySQL root user. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_admin_token', - help=('Keystone admin token. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_admin_password', - help=('Keystone admin password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_glance_password', - help=('Glance service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_heat_encryption_key', - help=('Heat db encryption key(must be 16, 24, or 32 characters.' - ' If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_heat_password', - help=('Heat service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_heat_cfn_password', - help=('Heat cfn service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_neutron_password', - help=('Neutron service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_nova_password', - help=('Nova service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_ironic_password', - help=('Ironic service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_aodh_password', - help=('Aodh service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_gnocchi_password', - help=('Gnocchi service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_ceilometer_password', - help=('Ceilometer service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_panko_password', - help=('Panko service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_ceilometer_metering_secret', - help=('Ceilometer metering secret. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_ceilometer_snmpd_user', - default='ro_snmp_user', - help=('Ceilometer snmpd read-only user. If this value is ' - 'changed from the default, the new value must be passed ' - 'in the overcloud environment as the parameter ' - 'SnmpdReadonlyUserName. This value must be between ' - '1 and 32 characters long.') - ), - cfg.StrOpt('undercloud_ceilometer_snmpd_password', - help=('Ceilometer snmpd password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_swift_password', - help=('Swift service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_mistral_password', - help=('Mistral service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_rabbit_cookie', - help=('Rabbitmq cookie. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_rabbit_password', - help=('Rabbitmq password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_rabbit_username', - help=('Rabbitmq username. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_heat_stack_domain_admin_password', - help=('Heat stack domain admin password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_swift_hash_suffix', - help=('Swift hash suffix. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_haproxy_stats_password', - help=('HAProxy stats password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_zaqar_password', - help=('Zaqar password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_horizon_secret_key', - help=('Horizon secret key. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_cinder_password', - help=('Cinder service password. ' - 'If left unset, one will be automatically generated.') - ), - cfg.StrOpt('undercloud_novajoin_password', - help=('Novajoin vendordata plugin service password. ' - 'If left unset, one will be automatically generated.') - ), -] -CONF.register_opts(_opts) -CONF.register_opts(_auth_opts, group='auth') - - -def _load_subnets_config_groups(): - for group in CONF.subnets: - g = cfg.OptGroup(name=group, title=group) - CONF.register_opts(_subnets_opts, group=g) - - -def list_opts(): - return [(None, copy.deepcopy(_opts)), - (SUBNETS_DEFAULT[0], copy.deepcopy(_subnets_opts)), - ('auth', copy.deepcopy(_auth_opts)), - ] - - -def _configure_logging(level, filename): - """Does the initial logging configuration - - This should only ever be called once. If further changes to the logging - config are needed they should be made directly on the LOG object. - - :param level: The desired logging level - :param filename: The log file. Set to None to disable file logging. - """ - try: - os.makedirs(os.path.dirname(PATHS.LOG_FILE)) - except OSError as e: - if e.errno != errno.EEXIST: - raise - logging.basicConfig(filename=filename, - format=DEFAULT_LOG_FORMAT, - level=level) - global LOG - LOG = logging.getLogger(__name__) - if os.environ.get('OS_LOG_CAPTURE') != '1': - handler = logging.StreamHandler() - formatter = logging.Formatter(DEFAULT_LOG_FORMAT) - handler.setFormatter(formatter) - LOG.addHandler(handler) - - -def _load_config(): - conf_params = [] - if os.path.isfile(PATHS.PASSWORD_PATH): - conf_params += ['--config-file', PATHS.PASSWORD_PATH] - if os.path.isfile(PATHS.CONF_PATH): - conf_params += ['--config-file', PATHS.CONF_PATH] - else: - LOG.warning('%s does not exist. Using defaults.' % PATHS.CONF_PATH) - CONF(conf_params) - - -def _run_command(args, env=None, name=None): - """Run the command defined by args and return its output - - :param args: List of arguments for the command to be run. - :param env: Dict defining the environment variables. Pass None to use - the current environment. - :param name: User-friendly name for the command being run. A value of - None will cause args[0] to be used. - """ - if name is None: - name = args[0] - - if env is None: - env = os.environ - env = env.copy() - - # When running a localized python script, we need to tell it that we're - # using utf-8 for stdout, otherwise it can't tell because of the pipe. - env['PYTHONIOENCODING'] = 'utf8' - - try: - return subprocess.check_output(args, - stderr=subprocess.STDOUT, - env=env).decode('utf-8') - except subprocess.CalledProcessError as e: - LOG.error('%s failed: %s', name, e.output) - raise - - -def _run_live_command(args, env=None, name=None): - """Run the command defined by args and log its output - - Takes the same arguments as _run_command, but runs the process - asynchronously so the output can be logged while the process is still - running. - """ - if name is None: - name = args[0] - - if env is None: - env = os.environ - env = env.copy() - - # When running a localized python script, we need to tell it that we're - # using utf-8 for stdout, otherwise it can't tell because of the pipe. - env['PYTHONIOENCODING'] = 'utf8' - - process = subprocess.Popen(args, env=env, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - while True: - line = process.stdout.readline().decode('utf-8') - if line: - LOG.info(line.rstrip()) - if line == '' and process.poll() is not None: - break - if process.returncode != 0: - raise RuntimeError('%s failed. See log for details.' % name) - - -def _check_hostname(): - """Check system hostname configuration - - Rabbit and Puppet require pretty specific hostname configuration. This - function ensures that the system hostname settings are valid before - continuing with the installation. - """ - if CONF.undercloud_hostname is not None: - args = ['sudo', 'hostnamectl', 'set-hostname', - CONF.undercloud_hostname] - _run_command(args, name='hostnamectl') - - LOG.info('Checking for a FQDN hostname...') - args = ['sudo', 'hostnamectl', '--static'] - detected_static_hostname = _run_command(args, name='hostnamectl').rstrip() - LOG.info('Static hostname detected as %s', detected_static_hostname) - args = ['sudo', 'hostnamectl', '--transient'] - detected_transient_hostname = _run_command(args, - name='hostnamectl').rstrip() - LOG.info('Transient hostname detected as %s', detected_transient_hostname) - if detected_static_hostname != detected_transient_hostname: - LOG.error('Static hostname "%s" does not match transient hostname ' - '"%s".', detected_static_hostname, - detected_transient_hostname) - LOG.error('Use hostnamectl to set matching hostnames.') - raise RuntimeError('Static and transient hostnames do not match') - with open('/etc/hosts') as hosts_file: - for line in hosts_file: - if (not line.lstrip().startswith('#') and - detected_static_hostname in line.split()): - break - else: - short_hostname = detected_static_hostname.split('.')[0] - if short_hostname == detected_static_hostname: - raise RuntimeError('Configured hostname is not fully ' - 'qualified.') - sed_cmd = ('sed -i "s/127.0.0.1\(\s*\)/127.0.0.1\\1%s %s /" ' - '/etc/hosts' % - (detected_static_hostname, short_hostname)) - args = ['sudo', '/bin/bash', '-c', sed_cmd] - _run_command(args, name='hostname-to-etc-hosts') - LOG.info('Added hostname %s to /etc/hosts', - detected_static_hostname) - - -def _check_memory(): - """Check system memory - - The undercloud will not run properly in less than 8 GB of memory. - This function verifies that at least that much is available before - proceeding with install. - """ - mem = psutil.virtual_memory() - swap = psutil.swap_memory() - total_mb = (mem.total + swap.total) / 1024 / 1024 - if total_mb < REQUIRED_MB: - LOG.error('At least %d MB of memory is required for undercloud ' - 'installation. A minimum of 8 GB is recommended. ' - 'Only detected %d MB' % (REQUIRED_MB, total_mb)) - raise RuntimeError('Insufficient memory available') - - -def _check_ipv6_enabled(): - """Test if IPv6 is enabled - - If /proc/net/if_inet6 exist ipv6 sysctl settings are available. - """ - return os.path.isfile('/proc/net/if_inet6') - - -def _wrap_ipv6(ip): - """Wrap a IP address in square brackets if IPv6 - """ - if netutils.is_valid_ipv6(ip): - return "[%s]" % ip - return ip - - -def _check_sysctl(): - """Check sysctl option availability - - The undercloud will not install properly if some of the expected sysctl - values are not available to be set. - """ - options = ['net.ipv4.ip_forward', 'net.ipv4.ip_nonlocal_bind'] - if _check_ipv6_enabled(): - options.append('net.ipv6.ip_nonlocal_bind') - - not_available = [] - for option in options: - path = '/proc/sys/{opt}'.format(opt=option.replace('.', '/')) - if not os.path.isfile(path): - not_available.append(option) - - if not_available: - LOG.error('Required sysctl options are not available. Check ' - 'that your kernel is up to date. Missing: ' - '{options}'.format(options=", ".join(not_available))) - raise RuntimeError('Missing sysctl options') - - -def _cidr_overlaps(a, b): - return a.first <= b.last and b.first <= a.last - - -def _validate_network(): - def error_handler(message): - LOG.error('Undercloud configuration validation failed: %s', message) - raise validator.FailedValidation(message) - - if (len(CONF.subnets) > 1 and not CONF.enable_routed_networks): - message = ('Multiple subnets specified: %s but routed networks are ' - 'not enabled.' % CONF.subnets) - error_handler(message) - - params = {opt.name: CONF[opt.name] for opt in _opts} - # Get parameters of "local_subnet", pass to validator to ensure parameters - # such as "local_ip", "undercloud_public_host" and "undercloud_admin_host" - # are valid - local_subnet_opts = CONF.get(CONF.local_subnet) - params.update({opt.name: local_subnet_opts[opt.name] - for opt in _subnets_opts}) - validator.validate_config(params, error_handler) - - # Validate subnet parameters - subnet_cidrs = [] - for subnet in CONF.subnets: - subnet_opts = CONF.get(subnet) - params = {opt.name: subnet_opts[opt.name] for opt in _subnets_opts} - - if any(_cidr_overlaps(x, netaddr.IPNetwork(subnet_opts.cidr)) - for x in subnet_cidrs): - message = ('CIDR of %s, %s, overlaps with another subnet.' % - (subnet, subnet_opts.cidr)) - error_handler(message) - subnet_cidrs.append(netaddr.IPNetwork(subnet_opts.cidr)) - - validator.validate_subnet(subnet, params, error_handler) - - -def _validate_no_ip_change(): - """Disallow provisioning interface IP changes - - Changing the provisioning network IP causes a number of issues, so we - need to disallow it early in the install before configurations start to - be changed. - """ - if CONF.net_config_override: - os_net_config_file = CONF.net_config_override - else: - os_net_config_file = '/etc/os-net-config/config.json' - # Nothing to do if we haven't already installed - if not os.path.isfile( - os.path.expanduser(os_net_config_file)): - return - try: - with open(os_net_config_file) as f: - network_config = json.loads(f.read()) - ctlplane = [i for i in network_config.get('network_config', []) - if i['name'] == 'br-ctlplane'][0] - except ValueError: - # File was empty - return - except IndexError: - # Nothing to check if br-ctlplane wasn't configured - return - existing_ip = ctlplane['addresses'][0]['ip_netmask'] - if existing_ip != CONF.local_ip: - message = ('Changing the local_ip is not allowed. Existing IP: ' - '%s, Configured IP: %s') % (existing_ip, - CONF.local_ip) - LOG.error(message) - raise validator.FailedValidation(message) - - -def _validate_passwords_file(): - """Disallow updates if the passwords file is missing - - If the undercloud was already deployed, the passwords file needs to be - present so passwords that can't be changed are persisted. If the file - is missing it will break the undercloud, so we should fail-fast and let - the user know about the problem. - """ - if (os.path.isfile(os.path.expanduser('~/stackrc')) and - not os.path.isfile(PATHS.PASSWORD_PATH)): - message = ('The %s file is missing. This will cause all service ' - 'passwords to change and break the existing undercloud. ' % - PATHS.PASSWORD_PATH) - raise validator.FailedValidation(message) - - -def _validate_architecure_options(): - def error_handler(message): - LOG.error('Undercloud configuration validation failed: %s', message) - raise validator.FailedValidation(message) - - params = {opt.name: CONF[opt.name] for opt in _opts} - validator._validate_additional_architectures(params, error_handler) - validator._validate_ppc64le_exclusive_opts(params, error_handler) - - -def _validate_configuration(): - try: - _check_hostname() - _check_memory() - _check_sysctl() - _validate_network() - _validate_no_ip_change() - _validate_passwords_file() - _validate_architecure_options() - except RuntimeError as e: - LOG.error('An error occurred during configuration validation, ' - 'please check your host configuration and try again. ' - 'Error message: {error}'.format(error=e)) - sys.exit(1) - - -def _generate_password(length=40): - """Create a random password - - Copied from rdomanager-oscplugin. This should eventually live in - tripleo-common. - """ - uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8") - return hashlib.sha1(uuid_str).hexdigest()[:length] - - -def _get_service_endpoints(name, format_str, public, internal, admin=None, - public_proto='http', internal_proto='http'): - endpoints = {} - upper_name = name.upper().replace('-', '_') - public_port_key = 'port' - - if not admin: - admin = internal - if public_proto in ['https', 'wss']: - public_port_key = 'ssl_port' - - endpoints['UNDERCLOUD_ENDPOINT_%s_PUBLIC' % upper_name] = ( - format_str % (public_proto, _wrap_ipv6(public['host']), - public[public_port_key])) - endpoints['UNDERCLOUD_ENDPOINT_%s_INTERNAL' % upper_name] = ( - format_str % (internal_proto, _wrap_ipv6(internal['host']), - internal['port'])) - endpoints['UNDERCLOUD_ENDPOINT_%s_ADMIN' % upper_name] = ( - format_str % (internal_proto, _wrap_ipv6(admin['host']), - admin['port'])) - return endpoints - - -def _generate_endpoints(instack_env): - local_host = instack_env['LOCAL_IP'] - public_host = local_host - public_proto = 'http' - internal_host = local_host - internal_proto = 'http' - zaqar_ws_public_proto = 'ws' - zaqar_ws_internal_proto = 'ws' - - if (CONF.undercloud_service_certificate or - CONF.generate_service_certificate): - public_host = CONF.undercloud_public_host - internal_host = CONF.undercloud_admin_host - public_proto = 'https' - zaqar_ws_public_proto = 'wss' - - endpoints = {} - - endpoint_list = [ - ('heat', - '%s://%s:%d/v1/%%(tenant_id)s', - {'host': public_host, 'port': 8004, 'ssl_port': 13004}, - {'host': internal_host, 'port': 8004}), - ('heat-cfn', - '%s://%s:%d/v1/%%(tenant_id)s', - {'host': public_host, 'port': 8000, 'ssl_port': 13800}, - {'host': internal_host, 'port': 8000}), - ('heat-ui-proxy', - '%s://%s:%d', - {'host': public_host, 'port': 8004, 'ssl_port': 13004}, - {'host': internal_host, 'port': 8004}), - ('heat-ui-config', - '%s://%s:%d/heat/v1/%%(project_id)s', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}), - ('neutron', - '%s://%s:%d', - {'host': public_host, 'port': 9696, 'ssl_port': 13696}, - {'host': internal_host, 'port': 9696}), - ('glance', - '%s://%s:%d', - {'host': public_host, 'port': 9292, 'ssl_port': 13292}, - {'host': internal_host, 'port': 9292}), - ('nova', - '%s://%s:%d/v2.1', - {'host': public_host, 'port': 8774, 'ssl_port': 13774}, - {'host': internal_host, 'port': 8774}), - ('nova-ui-proxy', - '%s://%s:%d', - {'host': public_host, 'port': 8774, 'ssl_port': 13774}, - {'host': internal_host, 'port': 8774}), - ('nova-ui-config', - '%s://%s:%d/nova/v2.1', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}), - ('placement', - '%s://%s:%d/placement', - {'host': public_host, 'port': 8778, 'ssl_port': 13778}, - {'host': internal_host, 'port': 8778}), - ('keystone', - '%s://%s:%d', - {'host': public_host, 'port': 5000, 'ssl_port': 13000}, - {'host': internal_host, 'port': 5000}, - {'host': internal_host, 'port': 35357}), - ('keystone-ui-config', - '%s://%s:%d/keystone/v3', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}, - {'host': internal_host, 'port': 35357}), - ('swift', - '%s://%s:%d/v1/AUTH_%%(tenant_id)s', - {'host': public_host, 'port': 8080, 'ssl_port': 13808}, - {'host': internal_host, 'port': 8080}), - ('swift-ui-proxy', - '%s://%s:%d', - {'host': public_host, 'port': 8080, 'ssl_port': 13808}, - {'host': internal_host, 'port': 8080}), - ('swift-ui-config', - '%s://%s:%d/swift/v1/AUTH_%%(project_id)s', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}), - ('ironic', - '%s://%s:%d', - {'host': public_host, 'port': 6385, 'ssl_port': 13385}, - {'host': internal_host, 'port': 6385}), - ('ironic-ui-config', - '%s://%s:%d/ironic', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}), - ('ironic_inspector', - '%s://%s:%d', - {'host': public_host, 'port': 5050, 'ssl_port': 13050}, - {'host': internal_host, 'port': 5050}), - ('ironic_inspector-ui-config', - '%s://%s:%d/ironic-inspector', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}), - ('aodh', - '%s://%s:%d', - {'host': public_host, 'port': 8042, 'ssl_port': 13042}, - {'host': internal_host, 'port': 8042}), - ('gnocchi', - '%s://%s:%d', - {'host': public_host, 'port': 8041, 'ssl_port': 13041}, - {'host': internal_host, 'port': 8041}), - ('panko', - '%s://%s:%d', - {'host': public_host, 'port': 8977, 'ssl_port': 13977}, - {'host': internal_host, 'port': 8977}), - ('mistral', - '%s://%s:%d/v2', - {'host': public_host, 'port': 8989, 'ssl_port': 13989}, - {'host': internal_host, 'port': 8989}), - ('mistral-ui-proxy', - '%s://%s:%d', - {'host': public_host, 'port': 8989, 'ssl_port': 13989}, - {'host': internal_host, 'port': 8989}), - ('mistral-ui-config', - '%s://%s:%d/mistral/v2', - {'host': public_host, 'port': 3000, 'ssl_port': 443}, - {'host': internal_host, 'port': 3000}), - ('zaqar', - '%s://%s:%d', - {'host': public_host, 'port': 8888, 'ssl_port': 13888}, - {'host': internal_host, 'port': 8888}), - ('cinder', - '%s://%s:%d/v1/%%(tenant_id)s', - {'host': public_host, 'port': 8776, 'ssl_port': 13776}, - {'host': internal_host, 'port': 8776}), - ('cinder_v2', - '%s://%s:%d/v2/%%(tenant_id)s', - {'host': public_host, 'port': 8776, 'ssl_port': 13776}, - {'host': internal_host, 'port': 8776}), - ('cinder_v3', - '%s://%s:%d/v3/%%(tenant_id)s', - {'host': public_host, 'port': 8776, 'ssl_port': 13776}, - {'host': internal_host, 'port': 8776}), - ] - for endpoint_data in endpoint_list: - endpoints.update( - _get_service_endpoints(*endpoint_data, - public_proto=public_proto, - internal_proto=internal_proto)) - - # Zaqar's websocket endpoint - # NOTE(jaosorior): Zaqar's websocket endpoint doesn't support being proxied - # on a different port. If that's done it will ignore the handshake and - # won't work. - endpoints.update(_get_service_endpoints( - 'zaqar-websocket', - '%s://%s:%d', - {'host': public_host, 'port': 9000, 'ssl_port': 9000}, - {'host': internal_host, 'port': 9000}, - public_proto=zaqar_ws_public_proto, - internal_proto=zaqar_ws_internal_proto)) - - endpoints.update(_get_service_endpoints( - 'zaqar-ui-proxy', - '%s://%s:%d', - {'host': public_host, 'port': 9000, 'ssl_port': 443, - 'zaqar_ws_public_proto': 'ws'}, - {'host': internal_host, 'port': 9000}, - public_proto=zaqar_ws_public_proto, - internal_proto=zaqar_ws_internal_proto)) - - endpoints.update(_get_service_endpoints( - 'zaqar-ui-config', - '%s://%s:%d/zaqar', - {'host': public_host, 'port': 3000, 'ssl_port': 443, - 'zaqar_ws_public_proto': 'wss'}, - {'host': internal_host, 'port': 3000}, - public_proto=zaqar_ws_public_proto, - internal_proto=zaqar_ws_internal_proto)) - - # The swift admin endpoint has a different format from the others - endpoints['UNDERCLOUD_ENDPOINT_SWIFT_ADMIN'] = ( - '%s://%s:%s' % (internal_proto, internal_host, 8080)) - instack_env.update(endpoints) - - -def _write_password_file(instack_env): - with open(PATHS.PASSWORD_PATH, 'w') as password_file: - password_file.write('[auth]\n') - for opt in _auth_opts: - env_name = opt.name.upper() - value = CONF.auth[opt.name] - if not value: - # Heat requires this encryption key to be a specific length - if env_name == 'UNDERCLOUD_HEAT_ENCRYPTION_KEY': - value = _generate_password(32) - else: - value = _generate_password() - LOG.info('Generated new password for %s', opt.name) - instack_env[env_name] = value - password_file.write('%s=%s\n' % (opt.name, value)) - os.chmod(PATHS.PASSWORD_PATH, 0o600) - - -def _member_role_exists(): - # This is a workaround for puppet removing the deprecated _member_ - # role on upgrade - if it exists we must restore role assignments - # or trusts stored in the undercloud heat will break - user, password, project, auth_url = _get_auth_values() - auth_kwargs = { - 'auth_url': auth_url, - 'username': user, - 'password': password, - 'project_name': project, - 'project_domain_name': 'Default', - 'user_domain_name': 'Default', - } - auth_plugin = ks_auth.Password(**auth_kwargs) - sess = session.Session(auth=auth_plugin) - disc = discover.Discover(session=sess) - c = disc.create_client() - try: - member_role = [r for r in c.roles.list() if r.name == '_member_'][0] - except IndexError: - # Do nothing if there is no _member_ role - return - if c.version == 'v2.0': - client_projects = c.tenants - else: - client_projects = c.projects - admin_project = [t for t in client_projects.list() if t.name == 'admin'][0] - admin_user = [u for u in c.users.list() if u.name == 'admin'][0] - if c.version == 'v2.0': - try: - c.roles.add_user_role(admin_user, member_role, admin_project.id) - LOG.info('Added _member_ role to admin user') - except ks_exceptions.http.Conflict: - # They already had the role - pass - else: - try: - c.roles.grant(member_role, - user=admin_user, - project=admin_project.id) - LOG.info('Added _member_ role to admin user') - except ks_exceptions.http.Conflict: - # They already had the role - pass - - -class InstackEnvironment(dict): - """An environment to pass to Puppet with some safety checks. - - Keeps lists of variables we add to the operating system environment, - and ensures that we don't anything not defined there. - """ - - INSTACK_KEYS = {'HOSTNAME', 'ELEMENTS_PATH', 'NODE_DIST', 'JSONFILE', - 'REG_METHOD', 'REG_HALT_UNREGISTER', 'PUBLIC_INTERFACE_IP'} - """The variables instack and/or used elements can read.""" - - DYNAMIC_KEYS = {'INSPECTION_COLLECTORS', 'INSPECTION_KERNEL_ARGS', - 'INSPECTION_NODE_NOT_FOUND_HOOK', - 'TRIPLEO_INSTALL_USER', 'TRIPLEO_UNDERCLOUD_CONF_FILE', - 'TRIPLEO_UNDERCLOUD_PASSWORD_FILE', - 'ENABLED_BOOT_INTERFACES', 'ENABLED_POWER_INTERFACES', - 'ENABLED_RAID_INTERFACES', 'ENABLED_VENDOR_INTERFACES', - 'ENABLED_MANAGEMENT_INTERFACES', 'SYSCTL_SETTINGS', - 'LOCAL_IP_WRAPPED', 'ENABLE_ARCHITECTURE_PPC64LE', - 'INSPECTION_SUBNETS', 'SUBNETS_CIDR_NAT_RULES', - 'SUBNETS_STATIC_ROUTES', 'MASQUERADE_NETWORKS'} - """The variables we calculate in _generate_environment call.""" - - PUPPET_KEYS = DYNAMIC_KEYS | {opt.name.upper() for _, group in list_opts() - for opt in group} - """Keys we pass for formatting the resulting hieradata.""" - - SET_ALLOWED_KEYS = DYNAMIC_KEYS | INSTACK_KEYS | PUPPET_KEYS - """Keys which we allow to add/change in this environment.""" - - def __init__(self): - super(InstackEnvironment, self).__init__(os.environ) - - def __setitem__(self, key, value): - if key not in self.SET_ALLOWED_KEYS: - raise KeyError('Key %s is not allowed for an InstackEnvironment' % - key) - return super(InstackEnvironment, self).__setitem__(key, value) - - -def _make_list(values): - """Generate a list suitable to pass to templates.""" - return '[%s]' % ', '.join('"%s"' % item for item in values) - - -def _generate_sysctl_settings(): - sysctl_settings = {} - sysctl_settings.update({"net.ipv4.ip_nonlocal_bind": {"value": 1}}) - if _check_ipv6_enabled(): - sysctl_settings.update({"net.ipv6.ip_nonlocal_bind": {"value": 1}}) - return json.dumps(sysctl_settings) - - -def _process_drivers_and_hardware_types(instack_env): - """Populate the environment with ironic driver information.""" - # Ensure correct rendering of the list and uniqueness of the items - enabled_hardware_types = set(CONF.enabled_hardware_types) - if CONF.enable_node_discovery: - if CONF.discovery_default_driver not in enabled_hardware_types: - enabled_hardware_types.add(CONF.discovery_default_driver) - instack_env['INSPECTION_NODE_NOT_FOUND_HOOK'] = 'enroll' - else: - instack_env['INSPECTION_NODE_NOT_FOUND_HOOK'] = '' - - # In most cases power and management interfaces are called the same, so we - # use one variable for them. - mgmt_interfaces = {'fake', 'ipmitool'} - # TODO(dtantsur): can we somehow avoid hardcoding hardware types here? - for hw_type in ('redfish', 'idrac', 'ilo', 'irmc', 'staging-ovirt'): - if hw_type in enabled_hardware_types: - mgmt_interfaces.add(hw_type) - for (hw_type, iface) in [('cisco-ucs-managed', 'ucsm'), - ('cisco-ucs-standalone', 'cimc')]: - if hw_type in enabled_hardware_types: - mgmt_interfaces.add(iface) - - # Two hardware types use non-default boot interfaces. - boot_interfaces = {'pxe'} - for hw_type in ('ilo', 'irmc'): - if hw_type in enabled_hardware_types: - boot_interfaces.add('%s-pxe' % hw_type) - - raid_interfaces = {'no-raid'} - if 'idrac' in enabled_hardware_types: - raid_interfaces.add('idrac') - - vendor_interfaces = {'no-vendor'} - for (hw_type, iface) in [('ipmi', 'ipmitool'), - ('idrac', 'idrac')]: - if hw_type in enabled_hardware_types: - vendor_interfaces.add(iface) - - instack_env['ENABLED_HARDWARE_TYPES'] = _make_list(enabled_hardware_types) - - instack_env['ENABLED_BOOT_INTERFACES'] = _make_list(boot_interfaces) - instack_env['ENABLED_MANAGEMENT_INTERFACES'] = _make_list(mgmt_interfaces) - instack_env['ENABLED_RAID_INTERFACES'] = _make_list(raid_interfaces) - instack_env['ENABLED_VENDOR_INTERFACES'] = _make_list(vendor_interfaces) - - # The snmp hardware type uses fake management and snmp power - if 'snmp' in enabled_hardware_types: - mgmt_interfaces.add('snmp') - instack_env['ENABLED_POWER_INTERFACES'] = _make_list(mgmt_interfaces) - - -def _generate_masquerade_networks(): - env_list = [] - for subnet in CONF.subnets: - s = CONF.get(subnet) - if s.masquerade: - env_list.append(s.cidr) - - # NOTE(hjensas): Remove once deprecated masquerade_network option is gone - if CONF.masquerade_network and (CONF.masquerade_network not in env_list): - env_list.append(CONF.masquerade_network) - - return json.dumps(env_list) - - -def _generate_inspection_subnets(): - env_list = [] - for subnet in CONF.subnets: - env_dict = {} - s = CONF.get(subnet) - env_dict['tag'] = subnet - env_dict['ip_range'] = s.inspection_iprange - env_dict['netmask'] = str(netaddr.IPNetwork(s.cidr).netmask) - env_dict['gateway'] = s.gateway - env_list.append(env_dict) - return json.dumps(env_list) - - -def _generate_subnets_static_routes(): - env_list = [] - local_router = CONF.get(CONF.local_subnet).gateway - for subnet in CONF.subnets: - if subnet == str(CONF.local_subnet): - continue - s = CONF.get(subnet) - env_list.append({'ip_netmask': s.cidr, - 'next_hop': local_router}) - return json.dumps(env_list) - - -def _generate_subnets_cidr_nat_rules(): - env_list = [] - for subnet in CONF.subnets: - s = CONF.get(subnet) - data_format = '"140 {direction} {name} cidr nat": ' \ - '{{"chain": "FORWARD", "{direction}": "{cidr}", ' \ - '"proto": "all", "action": "accept"}}' - env_list.append(data_format.format( - name=subnet, direction='destination', cidr=s.cidr)) - env_list.append(data_format.format( - name=subnet, direction='source', cidr=s.cidr)) - # Whitespace after newline required for indentation in templated yaml - return '\n '.join(env_list) - - -def _generate_environment(instack_root): - """Generate an environment dict for instack - - The returned dict will have the necessary values for use as the env - parameter when calling instack via the subprocess module. - - :param instack_root: The path containing the instack-undercloud elements - and json files. - """ - instack_env = InstackEnvironment() - # Rabbit uses HOSTNAME, so we need to make sure it's right - instack_env['HOSTNAME'] = CONF.undercloud_hostname or socket.gethostname() - - # Find the paths we need - json_file_dir = '/usr/share/instack-undercloud/json-files' - if not os.path.isdir(json_file_dir): - json_file_dir = os.path.join(instack_root, 'json-files') - instack_undercloud_elements = '/usr/share/instack-undercloud' - if not os.path.isdir(instack_undercloud_elements): - instack_undercloud_elements = os.path.join(instack_root, 'elements') - tripleo_puppet_elements = '/usr/share/tripleo-puppet-elements' - if not os.path.isdir(tripleo_puppet_elements): - tripleo_puppet_elements = os.path.join(os.getcwd(), - 'tripleo-puppet-elements', - 'elements') - if 'ELEMENTS_PATH' in os.environ: - instack_env['ELEMENTS_PATH'] = os.environ['ELEMENTS_PATH'] - else: - instack_env['ELEMENTS_PATH'] = ( - '%s:%s:' - '/usr/share/tripleo-image-elements:' - '/usr/share/diskimage-builder/elements' - ) % (tripleo_puppet_elements, instack_undercloud_elements) - - # Distro-specific values - distro = platform.linux_distribution()[0] - if distro.startswith('Red Hat Enterprise Linux'): - instack_env['NODE_DIST'] = os.environ.get('NODE_DIST') or 'rhel7' - instack_env['JSONFILE'] = ( - os.environ.get('JSONFILE') or - os.path.join(json_file_dir, 'rhel-7-undercloud-packages.json') - ) - instack_env['REG_METHOD'] = 'disable' - instack_env['REG_HALT_UNREGISTER'] = '1' - elif distro.startswith('CentOS'): - instack_env['NODE_DIST'] = os.environ.get('NODE_DIST') or 'centos7' - instack_env['JSONFILE'] = ( - os.environ.get('JSONFILE') or - os.path.join(json_file_dir, 'centos-7-undercloud-packages.json') - ) - elif distro.startswith('Fedora'): - instack_env['NODE_DIST'] = os.environ.get('NODE_DIST') or 'fedora' - raise RuntimeError('Fedora is not currently supported') - else: - raise RuntimeError('%s is not supported' % distro) - - if CONF['additional_architectures']: - for arch in CONF['additional_architectures']: - env_name = ('enable_architecture_%s' % arch).upper() - instack_env[env_name] = six.text_type(True) - - # Convert conf opts to env values - for opt in _opts: - env_name = opt.name.upper() - instack_env[env_name] = six.text_type(CONF[opt.name]) - - # Opts that needs extra processing - if CONF.inspection_runbench and not CONF.inspection_extras: - raise RuntimeError('inspection_extras must be enabled for ' - 'inspection_runbench to work') - if CONF.inspection_extras: - instack_env['INSPECTION_COLLECTORS'] = ('default,extra-hardware,' - 'numa-topology,logs') - else: - instack_env['INSPECTION_COLLECTORS'] = 'default,logs' - - inspection_kernel_args = [] - if CONF.undercloud_debug: - inspection_kernel_args.append('ipa-debug=1') - if CONF.inspection_runbench: - inspection_kernel_args.append('ipa-inspection-benchmarks=cpu,mem,disk') - if CONF.inspection_extras: - inspection_kernel_args.append('ipa-inspection-dhcp-all-interfaces=1') - inspection_kernel_args.append('ipa-collect-lldp=1') - - instack_env['INSPECTION_KERNEL_ARGS'] = ' '.join(inspection_kernel_args) - - _process_drivers_and_hardware_types(instack_env) - instack_env['INSPECTION_SUBNETS'] = _generate_inspection_subnets() - instack_env['SUBNETS_CIDR_NAT_RULES'] = _generate_subnets_cidr_nat_rules() - instack_env['MASQUERADE_NETWORKS'] = _generate_masquerade_networks() - instack_env['SUBNETS_STATIC_ROUTES'] = _generate_subnets_static_routes() - - instack_env['SYSCTL_SETTINGS'] = _generate_sysctl_settings() - - instack_env['PUBLIC_INTERFACE_IP'] = instack_env['LOCAL_IP'] - instack_env['LOCAL_IP'] = instack_env['LOCAL_IP'].split('/')[0] - instack_env['LOCAL_IP_WRAPPED'] = _wrap_ipv6(instack_env['LOCAL_IP']) - - if CONF.docker_registry_mirror: - instack_env['DOCKER_REGISTRY_MIRROR'] = CONF.docker_registry_mirror - if CONF.docker_insecure_registries: - instack_env['DOCKER_INSECURE_REGISTRIES'] = json.dumps( - CONF.docker_insecure_registries) - else: - # For backward compatibility with previous defaults - instack_env['DOCKER_INSECURE_REGISTRIES'] = json.dumps( - [instack_env['LOCAL_IP'] + ':' + '8787', - CONF.undercloud_admin_host + ':' + '8787']) - - # We're not in a chroot so this doesn't make sense, and it causes weird - # errors if it's set. - if instack_env.get('DIB_YUM_REPO_CONF'): - del instack_env['DIB_YUM_REPO_CONF'] - - instack_env['TRIPLEO_INSTALL_USER'] = getpass.getuser() - instack_env['TRIPLEO_UNDERCLOUD_CONF_FILE'] = PATHS.CONF_PATH - instack_env['TRIPLEO_UNDERCLOUD_PASSWORD_FILE'] = PATHS.PASSWORD_PATH - - # Mustache conditional logic requires ENABLE_NOVAJOIN to be undefined - # when novajoin is not enabled. - if instack_env['ENABLE_NOVAJOIN'].lower() == 'false': - del instack_env['ENABLE_NOVAJOIN'] - - _generate_endpoints(instack_env) - - _write_password_file(instack_env) - - if instack_env['UNDERCLOUD_SERVICE_CERTIFICATE']: - raw_value = instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] - abs_cert = os.path.abspath(raw_value) - if abs_cert != raw_value: - home_dir = os.path.expanduser('~') - if os.getcwd() != home_dir and os.path.exists(abs_cert): - LOG.warning('Using undercloud_service_certificate from ' - 'current directory, please use an absolute path ' - 'to remove ambiguity') - instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] = abs_cert - else: - instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] = os.path.join( - home_dir, raw_value) - elif CONF.generate_service_certificate: - public_host = CONF.undercloud_public_host - instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] = ( - '/etc/pki/tls/certs/undercloud-%s.pem' % public_host) - - return instack_env - - -def _get_template_path(template): - local_template_path = os.path.join( - os.path.dirname(__file__), - '..', - 'templates', - template) - installed_template_path = os.path.join( - '/usr/share/instack-undercloud/templates', - template) - if os.path.exists(local_template_path): - return local_template_path - else: - return installed_template_path - - -def _generate_init_data(instack_env): - context = instack_env.copy() - - if CONF.hieradata_override: - data_file = CONF.hieradata_override - hiera_entry = os.path.splitext(os.path.basename(data_file))[0] - dst = os.path.join('/etc/puppet/hieradata', - os.path.basename(data_file)) - if os.path.abspath(CONF.hieradata_override) != data_file: - # If we don't have an absolute path, compute it - data_file = os.path.join(os.path.expanduser('~'), data_file) - - if not os.path.exists(data_file): - raise RuntimeError( - "Could not find hieradata_override file '%s'" % data_file) - - _run_command(['sudo', 'mkdir', '-p', '/etc/puppet/hieradata']) - _run_command(['sudo', 'cp', data_file, dst]) - _run_command(['sudo', 'chmod', '0644', dst]) - else: - hiera_entry = '' - - if CONF.net_config_override: - net_config_json = open(CONF.net_config_override).read() - else: - net_config_json = \ - open(_get_template_path('net-config.json.template')).read() - - context['HIERADATA_OVERRIDE'] = hiera_entry - context['UNDERCLOUD_NAMESERVERS'] = json.dumps( - CONF.undercloud_nameservers) - partials = {'net_config': net_config_json} - renderer = pystache.Renderer(partials=partials) - template = _get_template_path('config.json.template') - - with open(template) as f: - config_json = renderer.render(f.read(), context) - - config_json = config_json.replace('"', '"') - cfn_path = '/var/lib/heat-cfntools/cfn-init-data' - tmp_json = tempfile.mkstemp()[1] - with open(tmp_json, 'w') as f: - print(config_json, file=f) - - if not os.path.exists(os.path.dirname(cfn_path)): - _run_command(['sudo', 'mkdir', '-p', os.path.dirname(cfn_path)]) - - _run_command(['sudo', 'mv', tmp_json, cfn_path]) - _run_command(['sudo', 'chmod', '0644', cfn_path]) - - -def _run_instack(instack_env): - args = ['sudo', '-E', 'instack', '-p', instack_env['ELEMENTS_PATH'], - '-j', instack_env['JSONFILE'], - ] - LOG.info('Running instack') - _run_live_command(args, instack_env, 'instack') - LOG.info('Instack completed successfully') - - -def _run_yum_clean_all(instack_env): - args = ['sudo', 'yum', 'clean', 'all'] - LOG.info('Running yum clean all') - _run_live_command(args, instack_env, 'yum-clean-all') - LOG.info('yum-clean-all completed successfully') - - -def _run_yum_update(instack_env): - args = ['sudo', 'yum', 'update', '-y'] - LOG.info('Running yum update') - _run_live_command(args, instack_env, 'yum-update') - LOG.info('yum-update completed successfully') - - -def _get_ovs_interfaces(): - interfaces = glob.glob('/etc/sysconfig/network-scripts/ifcfg-*') - pattern = "OVSIntPort" - ovs_interfaces = [] - for interface in interfaces: - with open(interface, "r") as text: - for line in text: - if re.findall(pattern, line): - # FIXME (holser). It might be better to get interface from - # DEVICE rather than name of file. - ovs_interfaces.append(interface.split('-')[-1]) - return ovs_interfaces - - -def _run_restore_ovs_interfaces(interfaces): - for interface in interfaces: - LOG.info('Running restart OVS interface %s', interface) - _run_command(['sudo', 'ifup', interface]) - LOG.info('Restart OVS interface %s completed successfully', interface) - - -def _run_orc(instack_env): - args = ['sudo', 'os-refresh-config'] - LOG.info('Running os-refresh-config') - _run_live_command(args, instack_env, 'os-refresh-config') - LOG.info('os-refresh-config completed successfully') - - -def _extract_from_stackrc(name): - """Extract authentication values from stackrc - - :param name: The value to be extracted. For example: OS_USERNAME or - OS_AUTH_URL. - """ - with open(os.path.expanduser('~/stackrc')) as f: - for line in f: - if name in line: - parts = line.split('=') - return parts[1].rstrip() - - -def _ensure_user_identity(id_path): - if not os.path.isfile(id_path): - args = ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', id_path] - _run_command(args) - LOG.info('Generated new ssh key in ~/.ssh/id_rsa') - - -def _get_auth_values(): - """Get auth values from stackrc - - Returns the user, password, project and auth_url as read from stackrc, - in that order as a tuple. - """ - user = _extract_from_stackrc('OS_USERNAME') - password = _run_command(['sudo', 'hiera', 'admin_password']).rstrip() - project = _extract_from_stackrc('OS_PROJECT_NAME') - auth_url = _extract_from_stackrc('OS_AUTH_URL') - return user, password, project, auth_url - - -def _configure_ssh_keys(nova): - """Configure default ssh keypair in Nova - - Generates a new ssh key for the current user if one does not already - exist, then uploads that to Nova as the 'default' keypair. - """ - id_path = os.path.expanduser('~/.ssh/id_rsa') - _ensure_user_identity(id_path) - - try: - nova.keypairs.get('default') - except exceptions.NotFound: - with open(id_path + '.pub') as pubkey: - nova.keypairs.create('default', pubkey.read().rstrip()) - - -def _ensure_ssh_selinux_permission(): - ssh_path = os.path.expanduser('~/.ssh') - try: - enforcing = _run_command(['getenforce']) - if os.path.isdir(ssh_path): - if 'Enforcing' in enforcing: - file_perms = _run_command( - ['find', ssh_path, '-exec', 'ls', '-lZ', '{}', ';']) - wrong_perm = False - for line in file_perms.splitlines(): - if 'ssh_home_t' not in line: - wrong_perm = True - break - if wrong_perm: - cmd = ['sudo', 'semanage', - 'fcontext', '-a', '-t', 'ssh_home_t', - "{}(/.*)?".format(ssh_path)] - _run_command(cmd) - _run_command(['restorecon', '-R', ssh_path]) - except OSError as e: - if e.errno == os.errno.ENOENT: - LOG.debug("Not a SeLinux platform") - else: - raise - - -def _delete_default_flavors(nova): - """Delete the default flavors from Nova - - The m1.tiny, m1.small, etc. flavors are not useful on an undercloud. - """ - to_delete = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'] - for f in nova.flavors.list(): - if f.name in to_delete: - nova.flavors.delete(f.id) - - -def _ensure_flavor(nova, existing, name, profile=None): - rc_key_name = 'resources:CUSTOM_%s' % DEFAULT_NODE_RESOURCE_CLASS.upper() - keys = { - # First, make it request the default resource class - rc_key_name: "1", - # Then disable scheduling based on everything else - "resources:DISK_GB": "0", - "resources:MEMORY_MB": "0", - "resources:VCPU": "0" - } - - if existing is None: - flavor = nova.flavors.create(name, 4096, 1, 40) - - keys['capabilities:boot_option'] = 'local' - if profile is not None: - keys['capabilities:profile'] = profile - flavor.set_keys(keys) - message = 'Created flavor "%s" with profile "%s"' - - LOG.info(message, name, profile) - else: - LOG.info('Not creating flavor "%s" because it already exists.', name) - - # NOTE(dtantsur): it is critical to ensure that the flavors request - # the correct resource class, otherwise scheduling will fail. - old_keys = existing.get_keys() - for key in old_keys: - if key.startswith('resources:CUSTOM_') and key != rc_key_name: - LOG.warning('Not updating flavor %s, as it already has a ' - 'custom resource class %s. Make sure you have ' - 'enough nodes with this resource class.', - existing.name, key) - return - - # Keep existing values - keys.update(old_keys) - existing.set_keys(keys) - LOG.info('Flavor %s updated to use custom resource class %s', - name, DEFAULT_NODE_RESOURCE_CLASS) - - -def _ensure_node_resource_classes(ironic): - for node in ironic.node.list(limit=0, fields=['uuid', 'resource_class']): - if node.resource_class: - if node.resource_class != DEFAULT_NODE_RESOURCE_CLASS: - LOG.warning('Node %s is using a resource class %s instead ' - 'of the default %s. Make sure you use the correct ' - 'flavor for it.', node.uuid, node.resource_class, - DEFAULT_NODE_RESOURCE_CLASS) - continue - - ironic.node.update(node.uuid, - [{'path': '/resource_class', 'op': 'add', - 'value': DEFAULT_NODE_RESOURCE_CLASS}]) - LOG.info('Node %s resource class was set to %s', - node.uuid, DEFAULT_NODE_RESOURCE_CLASS) - - -def _copy_stackrc(): - args = ['sudo', 'cp', '/root/stackrc', os.path.expanduser('~')] - try: - _run_command(args, name='Copy stackrc') - except subprocess.CalledProcessError: - LOG.info("/root/stackrc not found, this is OK on initial deploy") - args = ['sudo', 'chown', getpass.getuser() + ':', - os.path.expanduser('~/stackrc')] - _run_command(args, name='Chown stackrc') - - -def _clean_os_refresh_config(): - orc_dirs = glob.glob('/usr/libexec/os-refresh-config/*') - args = ['sudo', 'rm', '-rf'] + orc_dirs - _run_command(args, name='Clean os-refresh-config') - - -def _clean_os_collect_config(): - occ_dir = '/var/lib/os-collect-config' - args = ['sudo', 'rm', '-fr', occ_dir] - _run_command(args, name='Clean os-collect-config') - - -def _create_mistral_config_environment(instack_env, mistral): - # Store all the required passwords from the Undercloud - # in a Mistral environment so they can be accessed - # by the Mistral actions. - - config_data = { - 'undercloud_ceilometer_snmpd_password': - instack_env['UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD'], - 'undercloud_db_password': - instack_env['UNDERCLOUD_DB_PASSWORD'] - } - env_name = 'tripleo.undercloud-config' - try: - env_data = mistral.environments.get(env_name).variables - except (ks_exceptions.NotFound, mistralclient_exc.APIException): - # If the environment is not created, we need to - # create it with the information in config_data - mistral.environments.create( - name=env_name, - description='Undercloud configuration parameters', - variables=json.dumps(config_data, sort_keys=True) - ) - return - - # If we are upgrading from an environment without - # variables defined in config_data, we need to update - # the environment variables. - - for var, value in iter(config_data.items()): - if var in env_data: - if env_data[var] != config_data[var]: - # Value in config_data is different - # need to update - env_data[var] = value - else: - # The value in config_data - # is new, we need to add it - env_data[var] = value - - # Here we update the current environment - # with the variables updated - mistral.environments.update( - name=env_name, - description='Undercloud configuration parameters', - variables=json.dumps(env_data, sort_keys=True) - ) - - -def _wait_for_mistral_execution(timeout_at, mistral, execution, message='', - fail_on_error=False): - while time.time() < timeout_at: - exe = mistral.executions.get(execution.id) - if exe.state == "RUNNING": - time.sleep(5) - continue - if exe.state == "SUCCESS": - return - else: - exe_out = "" - exe_created_at = time.strptime(exe.created_at, - "%Y-%m-%d %H:%M:%S") - ae_list = mistral.action_executions.list() - for ae in ae_list: - if ((ae.task_name == "run_validation") and - (ae.state == "ERROR") and - (time.strptime(ae.created_at, "%Y-%m-%d %H:%M:%S") > - exe_created_at)): - task = mistral.tasks.get(ae.task_execution_id) - task_res = task.to_dict().get('result') - exe_out = "%s %s" % (exe_out, task_res) - error_message = "ERROR %s %s Mistral execution ID: %s" % ( - message, exe_out, execution.id) - LOG.error(error_message) - if fail_on_error: - raise RuntimeError(error_message) - return - else: - exe = mistral.executions.get(execution.id) - error_message = ("TIMEOUT waiting for execution %s to finish. " - "State: %s" % (exe.id, exe.state)) - LOG.error(error_message) - if fail_on_error: - raise RuntimeError(error_message) - - -def _get_session(): - user, password, project, auth_url = _get_auth_values() - auth_kwargs = { - 'auth_url': auth_url, - 'username': user, - 'password': password, - 'project_name': project, - 'project_domain_name': 'Default', - 'user_domain_name': 'Default', - } - auth_plugin = ks_auth.Password(**auth_kwargs) - return session.Session(auth=auth_plugin) - - -def _run_validation_groups(groups=[], mistral_url='', timeout=540, - fail_on_error=False): - sess = _get_session() - mistral = mistralclient.client(mistral_url=mistral_url, session=sess) - LOG.info('Starting and waiting for validation groups %s ', groups) - execution = mistral.executions.create( - 'tripleo.validations.v1.run_groups', - workflow_input={'group_names': groups} - ) - fail_message = ("error running the validation groups %s " % groups) - timeout_at = time.time() + timeout - _wait_for_mistral_execution(timeout_at, mistral, execution, fail_message, - fail_on_error) - - -def _create_default_plan(mistral, plans, timeout=360): - plan_name = 'overcloud' - - if plan_name in plans: - LOG.info('Not creating default plan "%s" because it already exists.', - plan_name) - return - - execution = mistral.executions.create( - 'tripleo.plan_management.v1.create_deployment_plan', - workflow_input={ - 'container': plan_name, - 'use_default_templates': True, - } - ) - timeout_at = time.time() + timeout - fail_message = ("error creating the default Deployment Plan %s " - "Check the create_default_deployment_plan execution " - "in Mistral with openstack workflow execution list " % - plan_name) - _wait_for_mistral_execution(timeout_at, mistral, execution, fail_message, - fail_on_error=True) - - -def _upload_validations_to_swift(mistral, timeout=60): - LOG.info('Uploading default validations to Swift') - execution = mistral.executions.create( - 'tripleo.validations.v1.upload_validations') - fail_message = "error uploading default validations to Swift" - timeout_at = time.time() + timeout - _wait_for_mistral_execution(timeout_at, mistral, execution, fail_message, - fail_on_error=True) - - -def _prepare_ssh_environment(mistral): - mistral.executions.create('tripleo.validations.v1.copy_ssh_key') - - -def _create_logging_cron(mistral): - LOG.info('Configuring an hourly cron trigger for tripleo-ui logging') - mistral.cron_triggers.create( - 'publish-ui-logs-hourly', - 'tripleo.plan_management.v1.publish_ui_logs_to_swift', - pattern='0 * * * *' - ) - - -def _post_config_mistral(instack_env, mistral, swift): - LOG.info('Configuring Mistral workbooks') - - for workbook in [w for w in mistral.workbooks.list() - if w.name.startswith('tripleo')]: - mistral.workbooks.delete(workbook.name) - - managed_tag = 'tripleo-common-managed' - - all_workflows = mistral.workflows.list() - workflows_delete = [w.name for w in all_workflows - if managed_tag in w.tags] - - # in order to delete workflows they should have no triggers associated - for trigger in [t for t in mistral.cron_triggers.list() - if t.workflow_name in workflows_delete]: - mistral.cron_triggers.delete(trigger.name) - - for workflow_name in workflows_delete: - mistral.workflows.delete(workflow_name) - - for workbook in [f for f in os.listdir(PATHS.WORKBOOK_PATH) - if os.path.isfile(os.path.join(PATHS.WORKBOOK_PATH, f))]: - mistral.workbooks.create(os.path.join(PATHS.WORKBOOK_PATH, workbook)) - LOG.info('Mistral workbooks configured successfully') - - plans = [container["name"] for container in swift.get_account()[1]] - - _create_mistral_config_environment(instack_env, mistral) - _create_default_plan(mistral, plans) - _create_logging_cron(mistral) - - if CONF.enable_validations: - _prepare_ssh_environment(mistral) - _upload_validations_to_swift(mistral) - - -def _migrate_to_convergence(heat): - """Migrate all active stacks to use the convergence engine - - This appears to be a noop if the stack has already been migrated, so it - should be safe to run unconditionally. - - :param heat: A heat client instance - """ - for stack in heat.stacks.list(): - LOG.info('Migrating stack "%s" to convergence engine', stack.id) - args = ['sudo', '-E', 'heat-manage', 'migrate_convergence_1', stack.id] - _run_command(args, name='heat-manage') - LOG.info('Finished migrating stack "%s"', stack.id) - - -def _post_config(instack_env, upgrade): - _copy_stackrc() - user, password, project, auth_url = _get_auth_values() - sess = _get_session() - nova = novaclient.Client(2, session=sess) - - ironic = ir_client.get_client(1, session=sess, - os_ironic_api_version='1.21') - - sdk = os_client_config.make_sdk(auth_url=auth_url, - project_name=project, - username=user, - password=password, - project_domain_name='Default', - user_domain_name='Default') - - network = _ensure_neutron_network(sdk) - _config_neutron_segments_and_subnets(sdk, network.id) - - _configure_ssh_keys(nova) - _ensure_ssh_selinux_permission() - _delete_default_flavors(nova) - - _ensure_node_resource_classes(ironic) - - all_flavors = {f.name: f for f in nova.flavors.list()} - for name, profile in [('baremetal', None), - ('control', 'control'), - ('compute', 'compute'), - ('ceph-storage', 'ceph-storage'), - ('block-storage', 'block-storage'), - ('swift-storage', 'swift-storage')]: - _ensure_flavor(nova, all_flavors.get(name), name, profile) - - mistral_url = instack_env['UNDERCLOUD_ENDPOINT_MISTRAL_PUBLIC'] - mistral = mistralclient.client( - mistral_url=mistral_url, - session=sess) - swift = swiftclient.Connection( - authurl=auth_url, - session=sess - ) - _post_config_mistral(instack_env, mistral, swift) - _member_role_exists() - - # NOTE(bnemec): We are turning on the convergence engine in Queens, so we - # need to migrate all existing stacks on upgrade. This functionality can - # be removed in Rocky as all stacks should have been migrated by then. - if upgrade: - heat = os_client_config.make_client('orchestration', - auth_url=auth_url, - username=user, - password=password, - project_name=project, - project_domain_name='Default', - user_domain_name='Default') - _migrate_to_convergence(heat) - - -def _ensure_neutron_network(sdk): - try: - network = list(sdk.network.networks(name=PHYSICAL_NETWORK)) - if not network: - mtu = CONF.get("local_mtu") - network = sdk.network.create_network( - name=PHYSICAL_NETWORK, provider_network_type='flat', - provider_physical_network=PHYSICAL_NETWORK, mtu=mtu) - LOG.info("Network created %s", network) - # (hjensas) Delete the default segment, we create a new segment - # per subnet later. - segments = list(sdk.network.segments(network_id=network.id)) - sdk.network.delete_segment(segments[0].id) - LOG.info("Default segment on network %s deleted.", network.name) - else: - LOG.info("Not creating %s network, because it already exists.", - PHYSICAL_NETWORK) - network = network[0] - except Exception as e: - LOG.info("Network create/update failed %s", e) - raise - - return network - - -def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes, - allocation_pool, name, segment_id): - try: - # DHCP_START contains a ":" then assume a IPv6 subnet - if ':' in allocation_pool[0]['start']: - subnet = sdk.network.create_subnet( - name=name, - cidr=cidr, - gateway_ip=gateway, - enable_dhcp=True, - ip_version='6', - ipv6_address_mode='dhcpv6-stateless', - ipv6_ra_mode='dhcpv6-stateless', - allocation_pools=allocation_pool, - network_id=network_id, - segment_id=segment_id) - else: - subnet = sdk.network.create_subnet( - name=name, - cidr=cidr, - gateway_ip=gateway, - host_routes=host_routes, - enable_dhcp=True, - ip_version='4', - allocation_pools=allocation_pool, - network_id=network_id, - segment_id=segment_id) - LOG.info("Subnet created %s", subnet) - except Exception as e: - LOG.error("Create subnet %s failed: %s", name, e) - raise - - return subnet - - -def _neutron_subnet_update(sdk, subnet_id, gateway, host_routes, - allocation_pool, name): - update_values = {'name': name, 'gateway_ip': gateway, - 'host_routes': host_routes, - 'allocation_pools': allocation_pool} - try: - # DHCP_START contains a ":" then assume a IPv6 subnet - if ':' in allocation_pool[0]['start']: - del update_values['host_routes'] - subnet = sdk.network.update_subnet(subnet_id, **update_values) - LOG.info("Subnet updated %s", subnet) - except Exception as e: - LOG.error("Update subnet %s failed: %s", name, e) - raise - - -def _neutron_segment_create(sdk, name, network_id, phynet): - try: - segment = sdk.network.create_segment( - name=name, - network_id=network_id, - physical_network=phynet, - network_type='flat') - LOG.info("Neutron Segment created %s", segment) - except Exception as e: - LOG.info("Neutron Segment %s create failed %s", name, e) - raise - - return segment - - -def _neutron_segment_update(sdk, segment_id, name): - try: - segment = sdk.network.update_segment(segment_id, name=name) - LOG.info("Neutron Segment updated %s", segment) - except Exception as e: - LOG.info("Neutron Segment %s update failed %s", name, e) - raise - - -def _ensure_neutron_router(sdk, name, subnet_id): - try: - router = sdk.network.create_router(name=name, admin_state_up='true') - sdk.network.add_interface_to_router(router.id, subnet_id=subnet_id) - except Exception as e: - LOG.error("Create router for subnet %s failed: %s", name, e) - raise - - -def _get_subnet(sdk, cidr, network_id): - try: - subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id)) - except Exception: - raise - - return False if not subnet else subnet[0] - - -def _get_segment(sdk, phy, network_id): - try: - segment = list(sdk.network.segments(physical_network=phy, - network_id=network_id)) - except Exception: - raise - - return False if not segment else segment[0] - - -def _config_neutron_segments_and_subnets(sdk, ctlplane_id): - s = CONF.get(CONF.local_subnet) - subnet = _get_subnet(sdk, s.cidr, ctlplane_id) - if subnet and not subnet.segment_id: - LOG.warning("Local subnet %s already exists and is not associated " - "with a network segment. Any additional subnets will " - "be ignored.", CONF.local_subnet) - host_routes = [{'destination': '169.254.169.254/32', - 'nexthop': str(netaddr.IPNetwork(CONF.local_ip).ip)}] - allocation_pool = [{'start': s.dhcp_start, 'end': s.dhcp_end}] - _neutron_subnet_update(sdk, subnet.id, s.gateway, host_routes, - allocation_pool, CONF.local_subnet) - # If the subnet is IPv6 we need to start a router so that router - # advertisments are sent out for stateless IP addressing to work. - if ':' in s.dhcp_start: - _ensure_neutron_router(sdk, CONF.local_subnet, subnet.id) - else: - for name in CONF.subnets: - s = CONF.get(name) - - phynet = name - metadata_nexthop = s.gateway - if name == CONF.local_subnet: - phynet = PHYSICAL_NETWORK - metadata_nexthop = str(netaddr.IPNetwork(CONF.local_ip).ip) - - host_routes = [{'destination': '169.254.169.254/32', - 'nexthop': metadata_nexthop}] - allocation_pool = [{'start': s.dhcp_start, 'end': s.dhcp_end}] - - subnet = _get_subnet(sdk, s.cidr, ctlplane_id) - segment = _get_segment(sdk, phynet, ctlplane_id) - - if name == CONF.local_subnet: - if ((subnet and not segment) or - (subnet and segment and subnet.segment_id != segment.id)): - LOG.error( - 'The cidr: %s of the local subnet is already used in ' - 'subnet: %s associated with segment_id: %s.' % - (s.cidr, subnet.id, subnet.segment_id)) - raise RuntimeError('Local subnet cidr already associated.') - - if subnet: - _neutron_segment_update(sdk, subnet.segment_id, name) - _neutron_subnet_update(sdk, subnet.id, s.gateway, host_routes, - allocation_pool, name) - else: - if segment: - _neutron_segment_update(sdk, segment.id, name) - else: - segment = _neutron_segment_create(sdk, name, - ctlplane_id, phynet) - if CONF.enable_routed_networks: - subnet = _neutron_subnet_create(sdk, ctlplane_id, s.cidr, - s.gateway, host_routes, - allocation_pool, name, - segment.id) - elif name == CONF.local_subnet: - # Create subnet with segment_id: None if routed networks - # is not enabled. - # TODO(hjensas): Deprecate option and remove this once - # tripleo-ui can support managing baremetal port - # attributes. - subnet = _neutron_subnet_create(sdk, ctlplane_id, s.cidr, - s.gateway, host_routes, - allocation_pool, name, - None) - - # If the subnet is IPv6 we need to start a router so that router - # advertisments are sent out for stateless IP addressing to work. - if ':' in s.dhcp_start: - _ensure_neutron_router(sdk, name, subnet.id) - - -def _handle_upgrade_fact(upgrade=False): - """Create an upgrade fact for use in puppet - - Since we don't run different puppets depending on if it's an upgrade or - not, we need to be able to pass a flag into puppet to let it know if - we're doing an upgrade. This is helpful when trying to handle state - transitions from an already installed undercloud. This function creates - a static fact named undercloud_upgrade only after the install has occurred. - When invoked with upgrade=True, the $::undercloud_upgrade fact should - be set to true. - - :param upgrade: Boolean indicating if this is an upgrade action or not - """ - - fact_string = 'undercloud_upgrade={}'.format(upgrade) - fact_path = '/etc/facter/facts.d/undercloud_upgrade.txt' - if not os.path.exists(os.path.dirname(fact_path)) and upgrade: - _run_command(['sudo', 'mkdir', '-p', os.path.dirname(fact_path)]) - - # We only need to ensure the fact is correct when we've already installed - # the undercloud. - if os.path.exists(os.path.dirname(fact_path)): - tmp_fact = tempfile.mkstemp()[1] - with open(tmp_fact, 'w') as f: - f.write(fact_string.lower()) - _run_command(['sudo', 'mv', tmp_fact, fact_path]) - _run_command(['sudo', 'chmod', '0644', fact_path]) - - -def install(instack_root, upgrade=False): - """Install the undercloud - - :param instack_root: The path containing the instack-undercloud elements - and json files. - """ - undercloud_operation = "upgrade" if upgrade else "install" - try: - _configure_logging(DEFAULT_LOG_LEVEL, PATHS.LOG_FILE) - LOG.info('Logging to %s', PATHS.LOG_FILE) - _load_config() - _load_subnets_config_groups() - _clean_os_refresh_config() - _clean_os_collect_config() - _validate_configuration() - instack_env = _generate_environment(instack_root) - _generate_init_data(instack_env) - ovs_interfaces = _get_ovs_interfaces() - if CONF.undercloud_update_packages: - _run_yum_clean_all(instack_env) - if ovs_interfaces: - _run_restore_ovs_interfaces(ovs_interfaces) - _run_yum_update(instack_env) - _handle_upgrade_fact(upgrade) - _run_instack(instack_env) - _run_orc(instack_env) - # FIXME (holser). The RC of issue is in OVS flow restore. Once - # 'systemctl reload openvswitch' is fixed ovs port restoration can be - # removed. - if ovs_interfaces: - _run_restore_ovs_interfaces(ovs_interfaces) - _post_config(instack_env, upgrade) - _run_command(['sudo', 'rm', '-f', '/tmp/svc-map-services'], None, 'rm') - if upgrade and CONF.enable_validations: # Run post-upgrade validations - mistral_url = instack_env['UNDERCLOUD_ENDPOINT_MISTRAL_PUBLIC'] - _run_validation_groups(["post-upgrade"], mistral_url) - except Exception as e: - LOG.debug("An exception occurred", exc_info=True) - LOG.error(FAILURE_MESSAGE, - {'undercloud_operation': undercloud_operation, - 'exception': six.text_type(e), - 'log_file': PATHS.LOG_FILE}) - if CONF.undercloud_debug: - raise - sys.exit(1) - else: - LOG.info(COMPLETION_MESSAGE, - {'undercloud_operation': undercloud_operation, - 'password_path': PATHS.PASSWORD_PATH, - 'stackrc_path': os.path.expanduser('~/stackrc')}) - - -def _is_database_upgrade_needed(): - """Check whether a yum update will cause an major version update - for the database. - - :return whether an update will happen - :rtype bool - """ - need_upgrade = False - try: - args = ['sudo', 'rpm', '--query', - '--queryformat', '%{VERSION}', 'mariadb-server'] - installed = subprocess.check_output(args).strip() - LOG.info('Current mariadb version is: %s' % installed) - args = ['sudo', 'repoquery', '--pkgnarrow=updates', - '--queryformat', '%{VERSION}', 'mariadb-server'] - available = subprocess.check_output(args).strip() - LOG.info('Available mariadb version is: %s' % - (available or "(no new version available)")) - if available: - # compare major versions majorX.majorY.minor - major_installed, major_available = \ - [re.sub(r'^(\d+\.\d+)\..*', r'\1', ver) for - ver in [installed, available]] - - if not major_installed or not major_available: - raise RuntimeError('Could not determine mariadb versions ' - '(installed:"%s", available:"%s"' % - (major_installed, major_available)) - - need_upgrade = (major_available != major_installed) - if need_upgrade: - LOG.info('Major versions differ (%s vs %s), ' - 'database needs an upgrade' % - (major_installed, major_available)) - except subprocess.CalledProcessError: - LOG.error('Could not determine if mariadb will be updated') - raise - return need_upgrade - - -def pre_upgrade(): - _configure_logging(DEFAULT_LOG_LEVEL, PATHS.LOG_FILE) - - # Don't upgrade undercloud unless overcloud is in *_COMPLETE. - # As we're migrating overcloud stack to convergence in post_config, - # which would fail otherwise. It's better to fail fast. - user, password, project, auth_url = _get_auth_values() - heat = os_client_config.make_client('orchestration', - auth_url=auth_url, - username=user, - password=password, - project_name=project, - project_domain_name='Default', - user_domain_name='Default') - for stack in heat.stacks.list(): - if stack.status != 'COMPLETE': - LOG.error('Can not upgrade undercloud with FAILED overcloud') - sys.exit(1) - - args = ['sudo', 'systemctl', 'stop', 'openstack-*', 'neutron-*', - 'openvswitch', 'httpd'] - LOG.info('Stopping OpenStack and related services') - _run_live_command(args, name='systemctl stop') - LOG.info('Services stopped successfully') - - # Ensure nova data migrations are complete before upgrading packages - LOG.info('Running Nova online data migration') - _run_command(['sudo', '-E', '/usr/bin/nova-manage', 'db', - 'online_data_migrations']) - LOG.info('Nova online data migration completed') - - args = ['sudo', 'yum', 'install', '-y', 'ansible-pacemaker'] - LOG.info('Installing Ansible Pacemaker module') - _run_live_command(args, name='install ansible') - LOG.info('Ansible pacemaker install completed successfully') - - # Check whether a major version upgrade is pending for the database - mariadb_upgrade = _is_database_upgrade_needed() - - if mariadb_upgrade: - args = ['sudo', 'systemctl', 'stop', 'mariadb'] - LOG.info('Stopping OpenStack database before upgrade') - _run_live_command(args, name='systemctl stop mariadb') - LOG.info('Database stopped successfully') - - args = ['sudo', 'yum', 'update', '-y'] - LOG.info('Updating full system') - _run_live_command(args, name='yum update') - LOG.info('Update completed successfully') - - if mariadb_upgrade: - args = ['sudo', 'systemctl', 'start', 'mariadb'] - LOG.info('Start OpenStack database after upgrade') - _run_live_command(args, name='systemctl start mariadb') - LOG.info('Database started successfully') - - args = ['sudo', 'mysql_upgrade'] - LOG.info('Run online upgrade of the database') - _run_live_command(args, name='mysql_upgrade') - LOG.info('Database upgraded successfully') - - args = ['sudo', 'systemctl', 'restart', 'mariadb'] - LOG.info('Restart OpenStack database for upgrade to take effect') - _run_live_command(args, name='systemctl restart mariadb') - LOG.info('Database restarted successfully') diff --git a/instack_undercloud/validator.py b/instack_undercloud/validator.py deleted file mode 100644 index e66f74081..000000000 --- a/instack_undercloud/validator.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2015 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -import netifaces -import six - - -SUPPORTED_ARCHITECTURES = ['ppc64le'] - - -class FailedValidation(Exception): - pass - - -def validate_config(params, error_callback): - """Validate an undercloud configuration described by params - - :param params: A dict containing all of the undercloud.conf option - names mapped to their proposed values. - :param error_callback: A callback function that should be used to handle - errors. The function must accept a single parameter, which will be - a string describing the error. - """ - local_params = dict(params) - _validate_value_formats(local_params, error_callback) - _validate_in_cidr(local_params, error_callback) - _validate_dhcp_range(local_params, error_callback) - _validate_inspection_range(local_params, error_callback) - _validate_no_overlap(local_params, error_callback) - _validate_ips(local_params, error_callback) - _validate_interface_exists(local_params, error_callback) - - -def _validate_ppc64le_exclusive_opts(params, error_callback): - if 'ppc64le' in params['additional_architectures']: - if 'ipxe_enabled' in params and params['ipxe_enabled']: - error_callback('Currently iPXE boot isn\'t supported with ' - 'ppc64le systems but is enabled') - - -def _validate_additional_architectures(params, error_callback): - for arch in params['additional_architectures']: - if arch not in SUPPORTED_ARCHITECTURES: - error_callback('%s "%s" must be a supported architecture: %s' % - ('additional_architectures', arch, - ' '.join(SUPPORTED_ARCHITECTURES))) - - -def _validate_ips(params, error_callback): - def is_ip(value, param_name): - try: - netaddr.IPAddress(value) - except netaddr.core.AddrFormatError: - error_callback( - '%s "%s" must be a valid IP address' % (param_name, value)) - for ip in params['undercloud_nameservers']: - is_ip(ip, 'undercloud_nameservers') - - -def _validate_value_formats(params, error_callback): - """Validate format of some values - - Certain values have a specific format that must be maintained in order to - work properly. For example, local_ip must be in CIDR form, and the - hostname must be a FQDN. - """ - for param in ('local_ip', 'cidr'): - if param in params: - try: - ip_net = netaddr.IPNetwork(params[param]) - if (ip_net.prefixlen == 32) or (ip_net.prefixlen == 0): - message = ('"%s" "%s" not valid: Invalid netmask.' % - (param, params[param])) - error_callback(message) - # If IPv6 the ctlplane network uses the EUI-64 address format, - # which requires the prefix to be /64 - if ip_net.version == 6 and ip_net.prefixlen != 64: - message = ('"%s" "%s" not valid: ' - 'Prefix must be 64 for IPv6.' % - (param, params[param])) - error_callback(message) - except netaddr.core.AddrFormatError as e: - message = ('"%s" "%s" not valid: "%s" ' - 'Value must be in CIDR format.' % - (param, params[param], str(e))) - error_callback(message) - except TypeError as e: - message = ('"%s" "%s" invalid type: "%s" ' % - (param, params[param], str(e))) - error_callback(message) - if 'undercloud_hostname' in params: - hostname = params['undercloud_hostname'] - if hostname is not None and '.' not in hostname: - message = 'Hostname "%s" is not fully qualified.' % hostname - error_callback(message) - - -def _validate_in_cidr(params, error_callback): - cidr = netaddr.IPNetwork(params['cidr']) - - def validate_addr_in_cidr(params, name, pretty_name=None, require_ip=True): - try: - if netaddr.IPAddress(params[name]) not in cidr: - message = ('%s "%s" not in defined CIDR "%s"' % - (pretty_name or name, params[name], cidr)) - error_callback(message) - except netaddr.core.AddrFormatError: - if require_ip: - message = 'Invalid IP address: %s' % params[name] - error_callback(message) - - # NOTE(hjensas): Only check certs etc if not validating routed subnets - if 'local_ip' in params: - params['just_local_ip'] = params['local_ip'].split('/')[0] - validate_addr_in_cidr(params, 'just_local_ip', 'local_ip') - # NOTE(bnemec): The ui needs to be externally accessible, which means - # in many cases we can't have the public vip on the provisioning - # network. In that case users are on their own to ensure they've picked - # valid values for the VIP hosts. - if ((params['undercloud_service_certificate'] or - params['generate_service_certificate']) and - not params['enable_ui']): - validate_addr_in_cidr(params, 'undercloud_public_host', - require_ip=False) - validate_addr_in_cidr(params, 'undercloud_admin_host', - require_ip=False) - # undercloud.conf uses inspection_iprange, the configuration wizard - # tool passes the values separately. - if 'inspection_iprange' in params: - inspection_iprange = params['inspection_iprange'].split(',') - params['inspection_start'] = inspection_iprange[0] - params['inspection_end'] = inspection_iprange[1] - validate_addr_in_cidr(params, 'gateway') - validate_addr_in_cidr(params, 'dhcp_start') - validate_addr_in_cidr(params, 'dhcp_end') - validate_addr_in_cidr(params, 'inspection_start', 'Inspection range start') - validate_addr_in_cidr(params, 'inspection_end', 'Inspection range end') - - -def _validate_dhcp_range(params, error_callback): - dhcp_start = netaddr.IPAddress(params['dhcp_start']) - dhcp_end = netaddr.IPAddress(params['dhcp_end']) - if dhcp_start >= dhcp_end: - message = ('Invalid dhcp range specified, dhcp_start "%s" does ' - 'not come before dhcp_end "%s"' % - (dhcp_start, dhcp_end)) - error_callback(message) - - -def _validate_inspection_range(params, error_callback): - inspection_start = netaddr.IPAddress(params['inspection_start']) - inspection_end = netaddr.IPAddress(params['inspection_end']) - if inspection_start >= inspection_end: - message = ('Invalid inspection range specified, inspection_start ' - '"%s" does not come before inspection_end "%s"' % - (inspection_start, inspection_end)) - error_callback(message) - - -def _validate_no_overlap(params, error_callback): - """Validate the provisioning and inspection ip ranges do not overlap""" - dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'], - params['dhcp_end'])) - inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'], - params['inspection_end'])) - # If there is any intersection of the two sets then we have a problem - if dhcp_set & inspection_set: - message = ('Inspection DHCP range "%s-%s" overlaps provisioning ' - 'DHCP range "%s-%s".' % - (params['inspection_start'], params['inspection_end'], - params['dhcp_start'], params['dhcp_end'])) - error_callback(message) - - -def _validate_interface_exists(params, error_callback): - """Validate the provided local interface exists""" - local_interface = params['local_interface'] - net_override = params['net_config_override'] - if not net_override and local_interface not in netifaces.interfaces(): - message = ('Invalid local_interface specified. %s is not available.' % - local_interface) - error_callback(message) - - -def _validate_no_missing_subnet_param(name, params, error_callback): - if None in six.viewvalues(params): - missing = list((k) for k, v in params.iteritems() if not v) - message = 'subnet %s. Missing option(s): %s' % (name, missing) - error_callback(message) - - -def validate_subnet(name, params, error_callback): - local_params = dict(params) - _validate_no_missing_subnet_param(name, params, error_callback) - _validate_value_formats(local_params, error_callback) - _validate_in_cidr(local_params, error_callback) - _validate_dhcp_range(local_params, error_callback) - _validate_inspection_range(local_params, error_callback) - _validate_no_overlap(local_params, error_callback) diff --git a/json-files/centos-7-undercloud-packages.json b/json-files/centos-7-undercloud-packages.json deleted file mode 100644 index 652d0bbf1..000000000 --- a/json-files/centos-7-undercloud-packages.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "name": "Installation", - "element": [ - "install-types", - "undercloud-install", - "enable-packages-install", - "element-manifest", - "puppet-stack-config" - ], - "hook": [ - "extra-data", - "pre-install", - "install", - "post-install" - ], - "exclude-element": [ - "pip-and-virtualenv", - "epel", - "os-collect-config", - "svc-map", - "pip-manifest", - "package-installs", - "pkg-map", - "puppet", - "cache-url", - "dib-python", - "os-svc-install", - "install-bin" - ], - "blacklist": [ - "99-refresh-completed" - ] - } -] diff --git a/json-files/rhel-7-undercloud-packages.json b/json-files/rhel-7-undercloud-packages.json deleted file mode 100644 index 652d0bbf1..000000000 --- a/json-files/rhel-7-undercloud-packages.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "name": "Installation", - "element": [ - "install-types", - "undercloud-install", - "enable-packages-install", - "element-manifest", - "puppet-stack-config" - ], - "hook": [ - "extra-data", - "pre-install", - "install", - "post-install" - ], - "exclude-element": [ - "pip-and-virtualenv", - "epel", - "os-collect-config", - "svc-map", - "pip-manifest", - "package-installs", - "pkg-map", - "puppet", - "cache-url", - "dib-python", - "os-svc-install", - "install-bin" - ], - "blacklist": [ - "99-refresh-completed" - ] - } -] diff --git a/lower-constraints.txt b/lower-constraints.txt deleted file mode 100644 index e56462d36..000000000 --- a/lower-constraints.txt +++ /dev/null @@ -1,84 +0,0 @@ -alabaster==0.7.10 -anyjson==0.3.3 -appdirs==1.3.0 -Babel==2.3.4 -bashate==0.5.1 -cliff==2.8.0 -cmd2==0.8.0 -coverage==4.0 -debtcollector==1.2.0 -decorator==3.4.0 -deprecation==1.0 -dib-utils==0.0.8 -docutils==0.11 -dogpile.cache==0.6.2 -dulwich==0.15.0 -extras==1.0.0 -fixtures==3.0.0 -flake8==2.2.4 -hacking==0.10.3 -imagesize==0.7.1 -iso8601==0.1.11 -Jinja2==2.10 -jmespath==0.9.0 -jsonpatch==1.16 -jsonpointer==1.13 -jsonschema==2.6.0 -keystoneauth1==3.4.0 -linecache2==1.0.0 -MarkupSafe==1.0 -mccabe==0.2.1 -mock==2.0.0 -monotonic==0.6 -mox3==0.20.0 -msgpack-python==0.4.0 -munch==2.1.0 -netaddr==0.7.18 -netifaces==0.10.4 -openstackdocstheme==1.18.1 -openstacksdk==0.11.2 -os-apply-config==5.0.0 -os-client-config==1.28.0 -os-refresh-config==6.0.0 -os-service-types==1.2.0 -osc-lib==1.8.0 -oslo.config==5.2.0 -oslo.i18n==3.15.3 -oslo.serialization==2.18.0 -oslo.utils==3.33.0 -oslotest==3.2.0 -pbr==2.0.0 -pep8==1.5.7 -positional==1.2.1 -prettytable==0.7.2 -psutil==3.2.2 -pyflakes==0.8.1 -Pygments==2.2.0 -pyparsing==2.1.0 -pyperclip==1.5.27 -pystache==0.5.4 -python-ironicclient==2.2.0 -python-keystoneclient==3.8.0 -python-mimeparse==1.6.0 -python-mistralclient==3.1.0 -python-novaclient==9.1.0 -python-subunit==1.0.0 -python-swiftclient==3.2.0 -pytz==2013.6 -PyYAML==3.12 -reno==2.5.0 -requests==2.14.2 -requestsexceptions==1.2.0 -rfc3986==0.3.1 -simplejson==3.5.1 -six==1.10.0 -snowballstemmer==1.2.1 -Sphinx==1.6.5 -sphinxcontrib-websupport==1.0.1 -stevedore==1.20.0 -testrepository==0.0.18 -testscenarios==0.4 -testtools==2.2.0 -traceback2==1.4.0 -unittest2==1.1.0 -wrapt==1.7.0 diff --git a/releasenotes/notes/6.0.0-7413b6a7cecc00b6.yaml b/releasenotes/notes/6.0.0-7413b6a7cecc00b6.yaml deleted file mode 100644 index 0bfb0dbc5..000000000 --- a/releasenotes/notes/6.0.0-7413b6a7cecc00b6.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -prelude: > - 6.0.0 is the final release for Ocata. - It's the first release where release notes are added. -features: - - Support for gnocchi service on undercloud to provide metrics support in - Telemetry. This will only be enabled when enable_telemetry is true. - - Support for panko service on undercloud to provide events support in - Telemetry. This will only be enabled when enable_telemetry is true. - - Remove Glance Registry from undercloud. It also means Glance API v1 won't - be available anymore. - - Validate vips when generating certificate. - - Improve upgrade process to include upgrade flag. This flag will be used - by the Puppet manifest to knows when an upgrade happens. - - Deploy Nova Placement API service. - - Novajoin service support. - - Run `yum update -y` before Puppet run. - - Optional Cinder support for undercloud. - - When Cinder is enabled, deploy both v2 and v3 APIs. - - Aodh is now configured by default to use its own mysql backend. -deprecations: - - Ceilometer API is officially deprecated. The service is still enabled - when enable_telemetry is true. This can be disabled using the - enable_legacy_ceilometer_api option in undercloud.conf. Users should - start migrating to aodh, gnocchi and panko in future. diff --git a/releasenotes/notes/Configure-auth-and-authtoken-for-novajoin-0cadd15e79b54c47.yaml b/releasenotes/notes/Configure-auth-and-authtoken-for-novajoin-0cadd15e79b54c47.yaml deleted file mode 100644 index 4777ffd39..000000000 --- a/releasenotes/notes/Configure-auth-and-authtoken-for-novajoin-0cadd15e79b54c47.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The undercloud installation now adds a keystone user and configures the - authtoken middleware for novajoin. diff --git a/releasenotes/notes/TLS-by-default-bc12660c12ba7ab1.yaml b/releasenotes/notes/TLS-by-default-bc12660c12ba7ab1.yaml deleted file mode 100644 index 19ff88828..000000000 --- a/releasenotes/notes/TLS-by-default-bc12660c12ba7ab1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -security: - - | - TLS is now used by default for the public endpoints. This is done through - the generate_service_certificates option, which now defaults to 'True'. diff --git a/releasenotes/notes/add-additional-endpoints-96cb28a13c79e9d9.yaml b/releasenotes/notes/add-additional-endpoints-96cb28a13c79e9d9.yaml deleted file mode 100644 index 9aed9bf21..000000000 --- a/releasenotes/notes/add-additional-endpoints-96cb28a13c79e9d9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add additional endpoints to hieradata, which are used in the tripleo:ui - class to facilitate proxying of API endpoints via Apache's mod_rewrite diff --git a/releasenotes/notes/add-certificate-ekus-13e92513c562f0dc.yaml b/releasenotes/notes/add-certificate-ekus-13e92513c562f0dc.yaml deleted file mode 100644 index d9c77242c..000000000 --- a/releasenotes/notes/add-certificate-ekus-13e92513c562f0dc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixes `bug 1668775 `__ Certmonger certificate does not include EKUs - diff --git a/releasenotes/notes/add-gnocchi-event-dispatcher-d70df046292e333e.yaml b/releasenotes/notes/add-gnocchi-event-dispatcher-d70df046292e333e.yaml deleted file mode 100644 index 8b5c7dfbe..000000000 --- a/releasenotes/notes/add-gnocchi-event-dispatcher-d70df046292e333e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Add gnocchi to events dispatcher so ceilometer can - publish events to panko and gnocchi. diff --git a/releasenotes/notes/add-os-auth-type-5ed9338e73e0e172.yaml b/releasenotes/notes/add-os-auth-type-5ed9338e73e0e172.yaml deleted file mode 100644 index 5f25ced5c..000000000 --- a/releasenotes/notes/add-os-auth-type-5ed9338e73e0e172.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Add OS_AUTH_TYPE to undercloud stackrc file. Not all clients default to - keystone auth, so lets explicitly set the auth type in env. diff --git a/releasenotes/notes/add-variables-for-ironic-inspector-proxy-5fd349c75e3c054a.yaml b/releasenotes/notes/add-variables-for-ironic-inspector-proxy-5fd349c75e3c054a.yaml deleted file mode 100644 index 367c8137b..000000000 --- a/releasenotes/notes/add-variables-for-ironic-inspector-proxy-5fd349c75e3c054a.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add tripleo::ui::endpoint_proxy_ironic_inspector and - tripleo::ui::endpoint_config_ironic_inspector variables to elements for - use in new proxy config for ironic-inspector API service diff --git a/releasenotes/notes/ansible-deploy-a257e06fddb7001f.yaml b/releasenotes/notes/ansible-deploy-a257e06fddb7001f.yaml deleted file mode 100644 index 4c03ff312..000000000 --- a/releasenotes/notes/ansible-deploy-a257e06fddb7001f.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - The ``ansible`` deploy interface is enabled by default. It can be used by - updating a node with the following command:: - - openstack baremetal node set --deploy-interface ansible \ - --driver-info ansible_deploy_username= \ - --driver-info ansible_deploy_key_file= diff --git a/releasenotes/notes/change-cert-precedence-e1926868d137aa1d.yaml b/releasenotes/notes/change-cert-precedence-e1926868d137aa1d.yaml deleted file mode 100644 index 91df60ae9..000000000 --- a/releasenotes/notes/change-cert-precedence-e1926868d137aa1d.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The user-provided certificate (via the undercloud_service_certificate - option) now takes precedence over the autogenerated certificate (which is - created via the generate_service_certificate option) diff --git a/releasenotes/notes/change-default-nonssl-undercloud-ports-34e60f87f3eb7ad6.yaml b/releasenotes/notes/change-default-nonssl-undercloud-ports-34e60f87f3eb7ad6.yaml deleted file mode 100644 index b8c222127..000000000 --- a/releasenotes/notes/change-default-nonssl-undercloud-ports-34e60f87f3eb7ad6.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -upgrade: - - | - Changed the configuration of endpoints that UI uses in order to connect to - the Undercloud in a non-SSL deployment. The port number that the UI now - uses to communicate with the Undercloud for non-SSL connections is 3000, - which supports endpoint proxy configuration. Previously, this port number - was the default port number for the service endpoint that UI connected to. -fixes: - - | - Fixes `bug 1663199 `__ UI doesn't work without manual update on HTTP undercloud - diff --git a/releasenotes/notes/configurable-clients-endpoint_type-fc658f7ae935133f.yaml b/releasenotes/notes/configurable-clients-endpoint_type-fc658f7ae935133f.yaml deleted file mode 100644 index dc29d8e55..000000000 --- a/releasenotes/notes/configurable-clients-endpoint_type-fc658f7ae935133f.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - In /etc/heat/heat.conf, [clients]/endpoint_type was configured to use the - internal endpoints and this was hardcoded in puppet-stack-config.pp so - there was no way to change it. It's now configurable via the hiera key - heat_clients_endpoint_type. diff --git a/releasenotes/notes/create-heat-cfn-endpoint-c7c00e3b61a98b5e.yaml b/releasenotes/notes/create-heat-cfn-endpoint-c7c00e3b61a98b5e.yaml deleted file mode 100644 index 0db96b367..000000000 --- a/releasenotes/notes/create-heat-cfn-endpoint-c7c00e3b61a98b5e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The Heat CFN endpoint is now created in Keystone during the undercloud - install. A new configuration option, undercloud_heat_cfn_password is added - for the heat_cfn service user associated with the endpoint. diff --git a/releasenotes/notes/deprecate-instack-virt-setup-0e76669d1e068408.yaml b/releasenotes/notes/deprecate-instack-virt-setup-0e76669d1e068408.yaml deleted file mode 100644 index f34f2b436..000000000 --- a/releasenotes/notes/deprecate-instack-virt-setup-0e76669d1e068408.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecations: - - the instack-virt-setup script has been deprecated. diff --git a/releasenotes/notes/deprecate_auth_uri_parameter-545d2c29c84c7c64.yaml b/releasenotes/notes/deprecate_auth_uri_parameter-545d2c29c84c7c64.yaml deleted file mode 100644 index 6001b1c2c..000000000 --- a/releasenotes/notes/deprecate_auth_uri_parameter-545d2c29c84c7c64.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - auth_uri is depreacted and will be removed in a future release. - Please, use www_authenticate_uri instead. diff --git a/releasenotes/notes/deprecate_instack-083e17fb09f07fa0.yaml b/releasenotes/notes/deprecate_instack-083e17fb09f07fa0.yaml deleted file mode 100644 index 141b0661b..000000000 --- a/releasenotes/notes/deprecate_instack-083e17fb09f07fa0.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - instack-undercloud is deprecated in Rocky cycle and is replaced by - the containerized undercloud efforts in python-tripleoclient. diff --git a/releasenotes/notes/disable-ceilometer-api-14b270afc22d75c1.yaml b/releasenotes/notes/disable-ceilometer-api-14b270afc22d75c1.yaml deleted file mode 100644 index 56e8a8fb8..000000000 --- a/releasenotes/notes/disable-ceilometer-api-14b270afc22d75c1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - Ceilometer API is deprecated since ocata release. -fixes: - - Ceilometer API is now disabled by default. This has been deprecated - since ocata release. Use gnocchi/aodh and panko APIs instead. diff --git a/releasenotes/notes/disable-ceilometer-collector-64bbcbe58b122721.yaml b/releasenotes/notes/disable-ceilometer-collector-64bbcbe58b122721.yaml deleted file mode 100644 index e21eabe65..000000000 --- a/releasenotes/notes/disable-ceilometer-collector-64bbcbe58b122721.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - Ceilometer collector service is deprecated in pike release. -fixes: - - Disable ceilometer collector by default as its deprecated. All the - data will now be dispatched through pipeline directly. diff --git a/releasenotes/notes/disable-telemetry-by-default-d596b78fc08df1a9.yaml b/releasenotes/notes/disable-telemetry-by-default-d596b78fc08df1a9.yaml deleted file mode 100644 index b5ac7cf0f..000000000 --- a/releasenotes/notes/disable-telemetry-by-default-d596b78fc08df1a9.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - If you had telemetry enabled in Ocata and you upgrade to pike with - defaults, the telemetry services will be disabled upon upgrade. If - you choose to keep it enabled, set the enable_telemetry option to - true before upgrade and services will continue to be enabled after upgrade. -fixes: - - Finally disabling telemetry services on undercloud by default. Telemetry - use case has been quite limited on undercloud and it makes sense to - disable by default and let user enabl based on need. diff --git a/releasenotes/notes/disallow-ip-changes-bde0e2528544c71b.yaml b/releasenotes/notes/disallow-ip-changes-bde0e2528544c71b.yaml deleted file mode 100644 index e6b28341e..000000000 --- a/releasenotes/notes/disallow-ip-changes-bde0e2528544c71b.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - | - Network configuration changes are no longer allowed during undercloud - upgrades. Changing the local_ip of a deployed undercloud causes problems - with some of the services, so a pre-deployment check was added to prevent - such changes. - - Because the default CIDR was changed in this release, the check also - prevents accidental reconfiguration of the ctlplane network if the old - default is still in use, but not explicitly configured. diff --git a/releasenotes/notes/dnsmask-pxe-filter-start-stop-commands-c1f71c6f38f27c78.yaml b/releasenotes/notes/dnsmask-pxe-filter-start-stop-commands-c1f71c6f38f27c78.yaml deleted file mode 100644 index 70d27a7d0..000000000 --- a/releasenotes/notes/dnsmask-pxe-filter-start-stop-commands-c1f71c6f38f27c78.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixes and issue where the PXE filter in ironic-inspectors DHCP server may - become out of sync with the ironic-inspector service. `Bug 1780421 - `_. diff --git a/releasenotes/notes/docker_registry_mirror-41c5a17eec6133f2.yaml b/releasenotes/notes/docker_registry_mirror-41c5a17eec6133f2.yaml deleted file mode 100644 index 2a948a36f..000000000 --- a/releasenotes/notes/docker_registry_mirror-41c5a17eec6133f2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Add a new docker_registry_mirror option which can be used to - configure a registry mirror in the /etc/docker/daemon.json file. - The motivation for this change is to help support pulling images - from HTTP mirrors within CI. diff --git a/releasenotes/notes/domain_params-63ef884e1ee154c0.yaml b/releasenotes/notes/domain_params-63ef884e1ee154c0.yaml deleted file mode 100644 index 1c713653d..000000000 --- a/releasenotes/notes/domain_params-63ef884e1ee154c0.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -issues: - - | - Keystone v2.0 APIs were removed so we now need to configure - `project_domain_name` and `user_domain_name` to enable v3 API. - We're using the Default domain since it was already in-use. diff --git a/releasenotes/notes/drop-ceilometer-collector-c47d9aa6d47efedf.yaml b/releasenotes/notes/drop-ceilometer-collector-c47d9aa6d47efedf.yaml deleted file mode 100644 index fbca53970..000000000 --- a/releasenotes/notes/drop-ceilometer-collector-c47d9aa6d47efedf.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Drop ceilometer collector from undercloud. This was moved into legacy mode - in Pike and deprecated. diff --git a/releasenotes/notes/drop-legacy-ceilometer-api-b4b3540353f24905.yaml b/releasenotes/notes/drop-legacy-ceilometer-api-b4b3540353f24905.yaml deleted file mode 100644 index c03d6aa2c..000000000 --- a/releasenotes/notes/drop-legacy-ceilometer-api-b4b3540353f24905.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Remove legacy ceilometer api from undercloud. This was moved to legacy - mode in Pike. diff --git a/releasenotes/notes/enable-cinder-description-c53f8dc3a61e27c1.yaml b/releasenotes/notes/enable-cinder-description-c53f8dc3a61e27c1.yaml deleted file mode 100644 index ce4798356..000000000 --- a/releasenotes/notes/enable-cinder-description-c53f8dc3a61e27c1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - The description of the ``enable_cinder`` option was fixed to not imply - that booting from Cinder volumes is implemented in the undercloud. diff --git a/releasenotes/notes/fix-mistral-error-handling-58010a43f53ded5e.yaml b/releasenotes/notes/fix-mistral-error-handling-58010a43f53ded5e.yaml deleted file mode 100644 index 8e50ad30f..000000000 --- a/releasenotes/notes/fix-mistral-error-handling-58010a43f53ded5e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed an incompatibility with mistralclient 3.2.0, where a different - exception type was raised and thus not handled during the undercloud - install post config. See #1749186 diff --git a/releasenotes/notes/fix-panko-ssl-port-d812becf99525a9a.yaml b/releasenotes/notes/fix-panko-ssl-port-d812becf99525a9a.yaml deleted file mode 100644 index 3eace8691..000000000 --- a/releasenotes/notes/fix-panko-ssl-port-d812becf99525a9a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fix panko ssl port to match puppet-tripleo haproxy resource. diff --git a/releasenotes/notes/fix_ntp_configuration-1a74dd4e02a622f5.yaml b/releasenotes/notes/fix_ntp_configuration-1a74dd4e02a622f5.yaml deleted file mode 100644 index 848bf8378..000000000 --- a/releasenotes/notes/fix_ntp_configuration-1a74dd4e02a622f5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add a UNDERCLOUD_NTP_SERVERS configuration in undercloud.conf -fixes: - - Fixes `bug 1664537 `__ undercloud ntp configuration. diff --git a/releasenotes/notes/heat-convergence-fea9886b21ff02a5.yaml b/releasenotes/notes/heat-convergence-fea9886b21ff02a5.yaml deleted file mode 100644 index 10cf1c6e6..000000000 --- a/releasenotes/notes/heat-convergence-fea9886b21ff02a5.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - In this release the Heat convergence engine has been enabled on the - undercloud, which allows multiple stack updates to be run at the same time. -upgrade: - - | - On upgrade to this version, any existing overcloud stacks will be converted - to use the Heat convergence engine. The only user-visible impact of this - should be the ability to use Heat convergence features. diff --git a/releasenotes/notes/heat-dbsync-timeout-4301fb54b8711df5.yaml b/releasenotes/notes/heat-dbsync-timeout-4301fb54b8711df5.yaml deleted file mode 100644 index 52b0ea337..000000000 --- a/releasenotes/notes/heat-dbsync-timeout-4301fb54b8711df5.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Increasing the heat db-sync from 5 to 15 minutes. - During an undercloud upgrade, the database can be very big and the - dbsync needs at least 10 minutes to run. So we override the Puppet - default value of 5 minutes to have a timeout of 15 minutes for - production deployments. diff --git a/releasenotes/notes/heat-over-httpd-ae66469c8390b626.yaml b/releasenotes/notes/heat-over-httpd-ae66469c8390b626.yaml deleted file mode 100644 index c94506580..000000000 --- a/releasenotes/notes/heat-over-httpd-ae66469c8390b626.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Heat APIs (API, CFN and Cloudwatch) now run over httpd in the undercloud. diff --git a/releasenotes/notes/hw-types-ded17c6d920e1feb.yaml b/releasenotes/notes/hw-types-ded17c6d920e1feb.yaml deleted file mode 100644 index 72314f65c..000000000 --- a/releasenotes/notes/hw-types-ded17c6d920e1feb.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - Allow configuring enabled hardware types via new ``enabled_hardware_types`` - configuration option. See `the driver composition reform spec - `_ - for details. Enabled management and power interfaces are derived for - known hardware types. Inspection via **ironic-inspector** and - ``socat``-based serial console support is enabled by default. - - | - Support Redfish-compatible hardware via ``redfish`` hardware type. diff --git a/releasenotes/notes/include-swap-in-memory-check-fe378284f06aae1a.yaml b/releasenotes/notes/include-swap-in-memory-check-fe378284f06aae1a.yaml deleted file mode 100644 index a12d261d8..000000000 --- a/releasenotes/notes/include-swap-in-memory-check-fe378284f06aae1a.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - Swap memory is now included in the minimum memory check. While relying on - swap is still not recommended for production deployments, it is not - uncommon for developers to use SSD-backed swap to fit more instances into - a system with limited memory. diff --git a/releasenotes/notes/insecure_registries-58ffd10f75112b31.yaml b/releasenotes/notes/insecure_registries-58ffd10f75112b31.yaml deleted file mode 100644 index f111b559f..000000000 --- a/releasenotes/notes/insecure_registries-58ffd10f75112b31.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Introduce docker_insecure_registries that is an array of host/port - combiniations of docker insecure registries. The default value will - be the previous parameter that were hardcoded, but now we can easily - override it in undercloud.conf. diff --git a/releasenotes/notes/inspector-additional-hooks-9a5c8f5aad2bac31.yaml b/releasenotes/notes/inspector-additional-hooks-9a5c8f5aad2bac31.yaml deleted file mode 100644 index f25eae388..000000000 --- a/releasenotes/notes/inspector-additional-hooks-9a5c8f5aad2bac31.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add new plugins for lldp processing (``lldp_basic``) and switch - port link information (``local_link_connection``) to - ``processing_hooks`` in inspector.conf. diff --git a/releasenotes/notes/inspector-boot-mode-3c651f40d95abb46.yaml b/releasenotes/notes/inspector-boot-mode-3c651f40d95abb46.yaml deleted file mode 100644 index 30d0d909e..000000000 --- a/releasenotes/notes/inspector-boot-mode-3c651f40d95abb46.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - | - Introspection now detects and properly set boot mode (BIOS or UEFI) for - ironic nodes. -upgrade: - - | - The boot mode (BIOS or UEFI) is now detected on introspection and stored - on nodes as part of ``boot_mode`` capability. This has two consequences: - - * If you change the actual boot mode via hardware management interface, - you have to either re-run introspection or update it manually. - - * If you set **expected** boot mode on Ironic nodes manually (for drivers - that support it, e.g. ``pxe_ilo``), you have to double-check it after - every introspection run and fix if necessary. diff --git a/releasenotes/notes/inspector-mysql-0985b0bc920c8b34.yaml b/releasenotes/notes/inspector-mysql-0985b0bc920c8b34.yaml deleted file mode 100644 index ed003f396..000000000 --- a/releasenotes/notes/inspector-mysql-0985b0bc920c8b34.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - During the upgrade to Ocata, ironic-inspector is switched from a local - SQLite database to the same MySQL/MariaDB all other services are using. - Please make sure that no introspections are in progress during upgrade. - Please re-create introspection rules after the upgrade. - This change does not affect the ability to retrieve introspection data - from introspection runs before the upgrade. diff --git a/releasenotes/notes/ipmi-cred-7d3b52a2618b66f7.yaml b/releasenotes/notes/ipmi-cred-7d3b52a2618b66f7.yaml deleted file mode 100644 index a995555de..000000000 --- a/releasenotes/notes/ipmi-cred-7d3b52a2618b66f7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - No longer set deprecated ``enable_setting_ipmi_credentials`` - ironic-inspector option to ``true``. Deployers still needing it should - set it explicitly via a hieradata override. diff --git a/releasenotes/notes/ipv6-disabled-sysctl-settings-7120b2af4d72b8ad.yaml b/releasenotes/notes/ipv6-disabled-sysctl-settings-7120b2af4d72b8ad.yaml deleted file mode 100644 index f2166d25b..000000000 --- a/releasenotes/notes/ipv6-disabled-sysctl-settings-7120b2af4d72b8ad.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The undercloud installer now checks if IPv6 is enabled before applying IPv6 - specific sysctl settings that is only available when IPv6 is not disabled. - (Fixes `bug 1675917 `__) - diff --git a/releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml b/releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml deleted file mode 100644 index b7245b19f..000000000 --- a/releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -fixes: - - | - The default ``IRONIC_API_VERSION`` in ``stackrc`` is now set to the same - value as ``OS_BAREMETAL_API_VERSION`` for consistency between two clients. -other: - - | - The default ``OS_BAREMETAL_API_VERSION`` in ``stackrc`` was bumped to 1.29, - which corresponds to Ocata final and allows using all recent features - without specifying and explicit version. diff --git a/releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml b/releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml deleted file mode 100644 index f0ed278dd..000000000 --- a/releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - The default bare metal API version used by the undercloud was bumped - to 1.34, which is latest API version supported by Pike ironicclient. diff --git a/releasenotes/notes/ironic-dbsync-da5d047e92841f78.yaml b/releasenotes/notes/ironic-dbsync-da5d047e92841f78.yaml deleted file mode 100644 index 5cee8904b..000000000 --- a/releasenotes/notes/ironic-dbsync-da5d047e92841f78.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - Undercloud upgrade will handle the change of ownership for ironic-dbsync.log - to become ironic:ironic instead of root:root. - Indeed, https://review.openstack.org/#/c/457478/ broke TripleO upgrades but - it's to fix a valid issue in the puppet-ironic module. We still want to handle - upgrades for existing deployments, that's why we manage the ownership change - in instack-undercloud. diff --git a/releasenotes/notes/ironic-inspector-use-pxe-filter-dnsmasq-611a69bc12011989.yaml b/releasenotes/notes/ironic-inspector-use-pxe-filter-dnsmasq-611a69bc12011989.yaml deleted file mode 100644 index aecf6ecfc..000000000 --- a/releasenotes/notes/ironic-inspector-use-pxe-filter-dnsmasq-611a69bc12011989.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Using the ``dnsmasq`` PXE filter for inspection fixes bug #1756075. diff --git a/releasenotes/notes/ironic-ssh-removal-72982955d848dfb3.yaml b/releasenotes/notes/ironic-ssh-removal-72982955d848dfb3.yaml deleted file mode 100644 index 206ed1214..000000000 --- a/releasenotes/notes/ironic-ssh-removal-72982955d848dfb3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - Out-of-box support for Ironic ``*_ssh`` drivers was removed. These drivers - were deprecated in the Newton release. diff --git a/releasenotes/notes/keystone_authtoken-44befee30afcc206.yaml b/releasenotes/notes/keystone_authtoken-44befee30afcc206.yaml deleted file mode 100644 index c91f5dfae..000000000 --- a/releasenotes/notes/keystone_authtoken-44befee30afcc206.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Mistral is now deployed with Keystone v3 options (authtoken). diff --git a/releasenotes/notes/keystonev3-4442d170d02d8dad.yaml b/releasenotes/notes/keystonev3-4442d170d02d8dad.yaml deleted file mode 100644 index 4a35b2c5f..000000000 --- a/releasenotes/notes/keystonev3-4442d170d02d8dad.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Update Keystone endpoints to be versionless, so v3 API can be used - by services that use service catalog in Keystone.. diff --git a/releasenotes/notes/logging-cron-291af6500bf05143.yaml b/releasenotes/notes/logging-cron-291af6500bf05143.yaml deleted file mode 100644 index 175e5fae5..000000000 --- a/releasenotes/notes/logging-cron-291af6500bf05143.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Implements `websocket-logging `__ - Add an hourly cron trigger for tripleo-ui logging diff --git a/releasenotes/notes/maintain-member-role-ecc556d81ce583a1.yaml b/releasenotes/notes/maintain-member-role-ecc556d81ce583a1.yaml deleted file mode 100644 index 4c9aea2e8..000000000 --- a/releasenotes/notes/maintain-member-role-ecc556d81ce583a1.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - The _member_ role (if it exists) on the admin user will now be retained - automatically during undercloud upgrades. This functionality was - originally added to work around an issue with upgrading very old versions - of TripleO, but was broken by changes to the upgrade process. It will - no longer be necessary to manually add the _member_ role to the admin user - after upgrading an affected deployment. diff --git a/releasenotes/notes/memcached_hardening-3d6984c9b6e5f3f3.yaml b/releasenotes/notes/memcached_hardening-3d6984c9b6e5f3f3.yaml deleted file mode 100644 index 8256af22a..000000000 --- a/releasenotes/notes/memcached_hardening-3d6984c9b6e5f3f3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security: - - | - Restrict memcached service to TCP and localhost network (CVE-2018-1000115). diff --git a/releasenotes/notes/migrate-plans-36bdf9a667ce02d5.yaml b/releasenotes/notes/migrate-plans-36bdf9a667ce02d5.yaml deleted file mode 100644 index b72b473a1..000000000 --- a/releasenotes/notes/migrate-plans-36bdf9a667ce02d5.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Deployment plan environments for existing plans will be migrated from - Mistral to Swift on undercloud upgrade. diff --git a/releasenotes/notes/migrate-to-hardware-types-df0b6a3bd0f818fc.yaml b/releasenotes/notes/migrate-to-hardware-types-df0b6a3bd0f818fc.yaml deleted file mode 100644 index 69ef19eff..000000000 --- a/releasenotes/notes/migrate-to-hardware-types-df0b6a3bd0f818fc.yaml +++ /dev/null @@ -1,9 +0,0 @@ -upgrade: - - | - During an upgrade to this release, a migration of all nodes from classic - drivers to hardware types will be attempted. For some nodes it may result - in non-supported optional interfaces (like "agent" RAID or shellinabox - console) to be reset to their no-op implementations (like "no-raid" RAID or - "no-console" console). Nodes that cannot be upgraded will be skipped. - Manual upgrade will be required, since the classic drivers may be removed - in the Rocky release. diff --git a/releasenotes/notes/mistral_cron_trigger_subsystem_interval-fdacb60599948e91.yaml b/releasenotes/notes/mistral_cron_trigger_subsystem_interval-fdacb60599948e91.yaml deleted file mode 100644 index 3d8fc018c..000000000 --- a/releasenotes/notes/mistral_cron_trigger_subsystem_interval-fdacb60599948e91.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - If you had cron triggers for Mistral in the undercloud, they will now only - execute at most every 10 minutes. Previously they could run as frequently - as every second. diff --git a/releasenotes/notes/mysql-timeout-ec1444c45da24a1e.yaml b/releasenotes/notes/mysql-timeout-ec1444c45da24a1e.yaml deleted file mode 100644 index 4e27d908d..000000000 --- a/releasenotes/notes/mysql-timeout-ec1444c45da24a1e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Set the connect_timeout to 60s for mysql connections. This helps fix an - issue where undercloud services lose the mysql connection if it takes more - than 10s to complete (eg under high load). diff --git a/releasenotes/notes/networking-baremetal-ml2-4b50d6bab617c00c.yaml b/releasenotes/notes/networking-baremetal-ml2-4b50d6bab617c00c.yaml deleted file mode 100644 index aadb857e6..000000000 --- a/releasenotes/notes/networking-baremetal-ml2-4b50d6bab617c00c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The undercloud now uses the ``baremetal`` neutron ML2 mechanism driver, and - the l2 agent ironic-neutron-agent. This enables the possibility to use - neutron routed networks in the undercloud. diff --git a/releasenotes/notes/node-discovery-8264e0c97cb5e00f.yaml b/releasenotes/notes/node-discovery-8264e0c97cb5e00f.yaml deleted file mode 100644 index 51789a6bd..000000000 --- a/releasenotes/notes/node-discovery-8264e0c97cb5e00f.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - Allow enabling auto-discovery of ironic nodes by setting new option - ``enable_node_discovery=True`` in the ``undercloud.conf``. When enabled, - adds unknown nodes that boot the introspection ramdisk to ironic in the - ``enroll`` provisioning state and the driver set to the value of - ``discovery_default_driver`` configuration option (``pxe_ipmitool`` by - default). See ironic-inspector documentation for more details: - https://docs.openstack.org/ironic-inspector/latest/user/usage.html#discovery. diff --git a/releasenotes/notes/nova-cert-9a8bbad1d51c0928.yaml b/releasenotes/notes/nova-cert-9a8bbad1d51c0928.yaml deleted file mode 100644 index e452f8b14..000000000 --- a/releasenotes/notes/nova-cert-9a8bbad1d51c0928.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Removing Nova cert service, which has been removed in Nova - during Pike cycle. diff --git a/releasenotes/notes/nova_cells_setup-471df6c9dd45166c.yaml b/releasenotes/notes/nova_cells_setup-471df6c9dd45166c.yaml deleted file mode 100644 index 79439b2df..000000000 --- a/releasenotes/notes/nova_cells_setup-471df6c9dd45166c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Configure the basic cells setup for Nova, now required in Ocata. diff --git a/releasenotes/notes/nova_db-677f60f74ba34df9.yaml b/releasenotes/notes/nova_db-677f60f74ba34df9.yaml deleted file mode 100644 index 13ed4fb2f..000000000 --- a/releasenotes/notes/nova_db-677f60f74ba34df9.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -other: - - | - Increase sync timeout for nova db syncs. - We have seen on lower quality hardware that the nova db syncs can take - an excessive amount of time. In order to still support deploying on this - hardware, we now increase the timeout from the default 300 seconds to 900 - seconds to allow for this less performant gear. - This value should never be bumped to more than 900. If we ever happen to - hit such time outs again, we'll have to investigate and fix the root cause. diff --git a/releasenotes/notes/nova_eventlet-84ad971618732da9.yaml b/releasenotes/notes/nova_eventlet-84ad971618732da9.yaml deleted file mode 100644 index fd95e51b2..000000000 --- a/releasenotes/notes/nova_eventlet-84ad971618732da9.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -issues: - - Deploy Nova API in eventlet instead of WSGI like it's suggested by - Nova team. - It's causing some issues that we didn't catch until now. - Related to `bug 1661360 - `__. diff --git a/releasenotes/notes/numa-topology-collector-8b000ce29863eecf.yaml b/releasenotes/notes/numa-topology-collector-8b000ce29863eecf.yaml deleted file mode 100644 index b3be620a0..000000000 --- a/releasenotes/notes/numa-topology-collector-8b000ce29863eecf.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Add 'numa-topology' collector to 'ipa-inspection-collectors' if - 'inspection_extras' is true. The 'numa-topology' collector will fetch the - details about memory, cpu, nics associated with each NUMA node during - introspection. These details will be necessary in deriving the deployment - parameters for NFV usecases. diff --git a/releasenotes/notes/ovirt-driver-caa85e9a99ba1aef.yaml b/releasenotes/notes/ovirt-driver-caa85e9a99ba1aef.yaml deleted file mode 100644 index 8cbcd66db..000000000 --- a/releasenotes/notes/ovirt-driver-caa85e9a99ba1aef.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds support for using oVirt with Ironic via the new ``staging-ovirt`` - hardware type. The hardware type is not enabled by default and requires - installing the ``ovirt-engine-sdk-python`` package. diff --git a/releasenotes/notes/relax-validation-for-ui-f27a5e9b64d1d6c1.yaml b/releasenotes/notes/relax-validation-for-ui-f27a5e9b64d1d6c1.yaml deleted file mode 100644 index ba5cfbca8..000000000 --- a/releasenotes/notes/relax-validation-for-ui-f27a5e9b64d1d6c1.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -fixes: - - | - Previously, when an IP value was provided for the undercloud_public_host - or undercloud_admin_host config value, it was validated to ensure it fell - within the network_cidr. This was to avoid problems when the CIDR was - changed but the IPs were not. However, this validation was broken for a - time in the case where generate_service_certificate was used. During this - time, the UI began to depend on the broken validation as it needs to - listen on a routable network, which the provisioning network often is not. - When the validation was fixed, the user was no longer able to configure - the host values to listen on a different routable network. - - To enable this UI functionality again, the host validation has been - disabled when enable_ui is true. This means the user is responsible for - selecting functional host values, but the UI can once again be configured - to listen on a separate network. diff --git a/releasenotes/notes/remove-image_path-configuration-9092b1c78da4d6de.yaml b/releasenotes/notes/remove-image_path-configuration-9092b1c78da4d6de.yaml deleted file mode 100644 index 6d70b849c..000000000 --- a/releasenotes/notes/remove-image_path-configuration-9092b1c78da4d6de.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - The image_path configuration option does nothing and has been removed. diff --git a/releasenotes/notes/remove-leftover-tuskar-packages-eeba9cf583a11ee3.yaml b/releasenotes/notes/remove-leftover-tuskar-packages-eeba9cf583a11ee3.yaml deleted file mode 100644 index 82115bf75..000000000 --- a/releasenotes/notes/remove-leftover-tuskar-packages-eeba9cf583a11ee3.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - During upgrade make sure to remove any left over tuskar packages. This - has been known to cause problems during the upgrade. See bug 1691744 - for more information -fixes: - - Closes bug https://bugs.launchpad.net/tripleo/+bug/1691744 which is - caused by left-over tuskar packages. This would only affect environments - deployed with TripleoO since Kilo and for which tuskar has not explicitly - been removed. diff --git a/releasenotes/notes/required-memory-increase-b7f22375c1d21aee.yaml b/releasenotes/notes/required-memory-increase-b7f22375c1d21aee.yaml deleted file mode 100644 index 6d6442cf8..000000000 --- a/releasenotes/notes/required-memory-increase-b7f22375c1d21aee.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - The required memory for an undercloud install has been increased from 4 GB - to 8 GB. Note that this is an absolute minimum. More memory is - recommended for production installs. diff --git a/releasenotes/notes/resource-class-init-e11b6a630bc47bed.yaml b/releasenotes/notes/resource-class-init-e11b6a630bc47bed.yaml deleted file mode 100644 index 2e34c7d42..000000000 --- a/releasenotes/notes/resource-class-init-e11b6a630bc47bed.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -upgrade: - - | - This release replaces node scheduling based on properties (CPU count, - memory and disk) with scheduling based on *custom resource classes*. - As part of this change during the upgrade: - - * The ``resource_class`` field is set to ``baremetal``, if empty. - * The standard flavors are adjusted to request one instance of the - ``baremetal`` resource class and to **not** request the standard - properties. Flavors that already have a resource class attached are - not changed. - - All non-standard custom flavors have to be changed in a similar way. - - See the `ironic flavor documentation - `_ - for details. diff --git a/releasenotes/notes/restart-collector-b043489fcdf1e9c7.yaml b/releasenotes/notes/restart-collector-b043489fcdf1e9c7.yaml deleted file mode 100644 index 1a3a29751..000000000 --- a/releasenotes/notes/restart-collector-b043489fcdf1e9c7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Add a dependency to restart collector after other services are - up and ceilometer upgrade is complete. diff --git a/releasenotes/notes/routed-subnets-undercloud-64bb87222db0555b.yaml b/releasenotes/notes/routed-subnets-undercloud-64bb87222db0555b.yaml deleted file mode 100644 index 52b529608..000000000 --- a/releasenotes/notes/routed-subnets-undercloud-64bb87222db0555b.yaml +++ /dev/null @@ -1,98 +0,0 @@ ---- -prelude: > - With support for routed networks, several options are deprecated and the - way undercloud networking is defined in the configuration file has several - changes. Please refer to the **Deprecation Notes** and **Upgrade notes** - section for details. -features: - - | - Routed networks support adds the ability to configure Ironic Inspector - and Neutron provisioning network in the undercloud to enable provisioning - of nodes via DHCP-relay to the undercloud from remote routed network - segments. Routed networks is by default disabled, to enable set option - ``enable_routed_networks`` to ``True`` in ``undercloud.conf``. - - .. Note:: Changing the ``enable_routed_networks`` option after the initial - undercloud installation is not possible. -upgrade: - - | - With support for routed network segments, several options are deprecated - and the way undercloud networking is defined in the configuration file has - several changes. - - **New option:** ``subnets`` A list of subnets. One entry for each routed - network segment used for provisioning and introspection. For each network - segment a section/group needs to be added to the configuration file - specifying the following subnet options: - - ====================== ================================================ - option Description - ====================== ================================================ - ``cidr`` Network CIDR for the subnet. - ``dhcp_start`` Start of DHCP allocation range for PXE and DHCP. - ``dhcp_end`` End of DHCP allocation range for PXE and DHCP. - ``inspection_iprange`` Temporary IP range that will be given to nodes - during the inspection process. - ``gateway`` Network(subnet) gateway/router. - ``masquerade`` (Boolean) If ``True`` the undercloud will - masquerade this network for external access. - ====================== ================================================ - - **New option:** ``local_subnet`` The name of the local subnet, where the - PXE boot and DHCP interfaces for overcloud instances is located. The IP - address of the local_ip/local_interface should reside in this subnet. - - .. Note:: Upgrade with migration to routed networks support is not - possible. - - Routed networks use the neutron segments service_plugin, this - plugin adds functionality that allows subnet to be associated - with a network segment. It is currently not possible to add - segment association to an existing subnet, because of this we - cannot add segment association to the existing ctlplane subnet - on the upgraded undercloud. The existing ctlplane network and - subnet will still be in place after an upgrade and the upgraded - undercloud can continue to manage the existing overcloud. - - The following example shows what changes to make to the configuration to - move to the new model. - - Replace usage of deprecated options:: - - [DEFAULT] - network_gateway = 192.168.24.1 - network_cidr = 192.168.24.0/24 - dhcp_start = 192.168.24.5 - dhcp_end = 192.168.24.24 - inspection_iprange = 192.168.24.100,192.168.24.120 - masquerade_network = 192.168.24.0/24 - - replace with:: - - [DEFAULT] - subnets = subnet0 - local_subnet = subnet0 - - [subnet0] - cidr = 192.168.24.0/24 - dhcp_start = 192.168.24.5 - dhcp_end = 192.168.24.24 - inspection_iprange = 192.168.24.100,192.168.24.120 - gateway = 192.168.24.1 - masquerade = True -deprecations: - - With support for routed networks/subnets the ``network_gateway`` option in - the ``[DEFAULT]`` section is deprecated. Moved to per-subnet options - group. - - With support for routed networks/subnets the ``network_cidr`` option in - the ``[DEFAULT]`` section is deprecated. Moved to per-subnet options - group. - - With support for routed networks/subnets the ``dhcp_start`` and - ``dhcp_end`` options in the ``[DEFAULT]`` section are deprecated. Moved to - per-subnet options group. - - With support for routed networks/subnets the ``inspection_iprange`` option - in the ``[DEFAULT]`` section is deprecated. Moved to per-subnet options - group. - - With support for routed networks/subnets the ``masquerade_network`` - option in the ``[DEFAULT]`` section is deprecated. Use the boolean option - in each subnet group. diff --git a/releasenotes/notes/run-ceilometer-gnocchi-upgrade-215cb426d25d11e9.yaml b/releasenotes/notes/run-ceilometer-gnocchi-upgrade-215cb426d25d11e9.yaml deleted file mode 100644 index 954230501..000000000 --- a/releasenotes/notes/run-ceilometer-gnocchi-upgrade-215cb426d25d11e9.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Run ceilometer-upgrade conditionally when gnocchi is running so that - gnocchi resource types are created. diff --git a/releasenotes/notes/set-dns-domain-08abe0d0fe7d2e65.yaml b/releasenotes/notes/set-dns-domain-08abe0d0fe7d2e65.yaml deleted file mode 100644 index ae69104da..000000000 --- a/releasenotes/notes/set-dns-domain-08abe0d0fe7d2e65.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The DNS domain for overcloud nodes can now be set in undercloud.conf - via the overcloud_domain_name option. The same value used for this option - must be passed to the overcloud deploy in the CloudDomain parameter. diff --git a/releasenotes/notes/set-event-publishers-6d687ba1c4235a21.yaml b/releasenotes/notes/set-event-publishers-6d687ba1c4235a21.yaml deleted file mode 100644 index 2f027850c..000000000 --- a/releasenotes/notes/set-event-publishers-6d687ba1c4235a21.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Explicitly set event pipeline publishers to panko and gnocchi to sent the - events to both endpoints. diff --git a/releasenotes/notes/set-valid-hosts-file-49d6aa96365908a7.yaml b/releasenotes/notes/set-valid-hosts-file-49d6aa96365908a7.yaml deleted file mode 100644 index edea8c785..000000000 --- a/releasenotes/notes/set-valid-hosts-file-49d6aa96365908a7.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - When the hostname was written to /etc/hosts, it resulted in an invalid - /etc/hosts file due to 127.0.0.1 being specified twice on different lines. - That issue is now corrected such that the hostnames will be added to the - existing line for 127.0.0.1, which results in valid syntax for /etc/hosts. - See https://bugs.launchpad.net/tripleo/+bug/1709460 diff --git a/releasenotes/notes/stackrc-v3-1e4513172af13806.yaml b/releasenotes/notes/stackrc-v3-1e4513172af13806.yaml deleted file mode 100644 index f7270040e..000000000 --- a/releasenotes/notes/stackrc-v3-1e4513172af13806.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Update undercloud stackrc to use Keystone v3 API. - It updates OS_AUTH_URL to use a versionless endpoint, change OS_TENANT_NAME - to OS_PROJECT_NAME, force OS_IDENTITY_API_VERSION to 3 and - add OS_PROJECT_DOMAIN_NAME pointing to default domain. diff --git a/releasenotes/notes/stop-using-mistral-env-41e6d19d999791dd.yaml b/releasenotes/notes/stop-using-mistral-env-41e6d19d999791dd.yaml deleted file mode 100644 index 9cce8c296..000000000 --- a/releasenotes/notes/stop-using-mistral-env-41e6d19d999791dd.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - The environment configuration for deployments is now stored in a - file called ``plan-environment.yaml`` that is stored in Swift - together with the templates. Mistral is no longer used to store - this data. ``openstack undercloud upgrade`` handles the migration - of existing plans automatically, including the deletion of the - Mistral environment. diff --git a/releasenotes/notes/swift-undercloud-logging-bf103e33fb444f01.yaml b/releasenotes/notes/swift-undercloud-logging-bf103e33fb444f01.yaml deleted file mode 100644 index f53997862..000000000 --- a/releasenotes/notes/swift-undercloud-logging-bf103e33fb444f01.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Enforce a restart of rsyslog after installing Swift rpms. Otherwise all - Swift logs end up in /var/log/messages instead of /var/log/swift/swift.log diff --git a/releasenotes/notes/swift_zaqar-d476d1a8eb946776.yaml b/releasenotes/notes/swift_zaqar-d476d1a8eb946776.yaml deleted file mode 100644 index 51b0d97ee..000000000 --- a/releasenotes/notes/swift_zaqar-d476d1a8eb946776.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Use Swift as a backend of Zaqar. This effectively removes the new of - MongoDB on the undercloud. diff --git a/releasenotes/notes/switch-to-hw-types-b3abf03ef9b7973b.yaml b/releasenotes/notes/switch-to-hw-types-b3abf03ef9b7973b.yaml deleted file mode 100644 index 66626cd90..000000000 --- a/releasenotes/notes/switch-to-hw-types-b3abf03ef9b7973b.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -features: - - | - The hardware types ``ilo`` and ``idrac`` are now enabled by default. - - | - Added support for the following hardware types: ``cisco-ucs-managed``, - ``cisco-ucs-standalone``, ``idrac``, ``ilo``, ``irmc``, ``snmp``. -deprecations: - - | - The ``enabled_drivers`` option is deprecated, please use - ``enabled_hardware_types``. Make sure to switch your nodes to hardware - types before disabling drivers. -upgrade: - - | - Classic drivers are going to be deprecated soon, it is recommended to - switch all overcloud nodes to matching hardware types. See the `hardware - types migration documentation - `_ - for the detailed procedure. - - | - As part of migration to hardware types, the default value of the - ``discovery_default_driver`` option was changed from ``pxe_ipmitool`` - to ``ipmi``. diff --git a/releasenotes/notes/update-ps1-in-rc-files-ee0edbebcd75c6fc.yaml b/releasenotes/notes/update-ps1-in-rc-files-ee0edbebcd75c6fc.yaml deleted file mode 100644 index dae902319..000000000 --- a/releasenotes/notes/update-ps1-in-rc-files-ee0edbebcd75c6fc.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - When sourcing the stackrc on the undercloud, the command prompt will show - that the credentials have been loaded by being prepended with - '(undercloud) '. For example, '(undercloud) [stack@undercloud ~]$ ' diff --git a/releasenotes/notes/update-ui-config-18c8701da3f7d3c1.yaml b/releasenotes/notes/update-ui-config-18c8701da3f7d3c1.yaml deleted file mode 100644 index f6a8c8f14..000000000 --- a/releasenotes/notes/update-ui-config-18c8701da3f7d3c1.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - The TripleO UI now supports Keystone v3 and %(project_id)s - placeholders in URLs. Updated the endpoints in the configuration to - reflect this. (Fixes bug `bug 1692046 - `__) diff --git a/releasenotes/notes/validate-local-interface-faec300f80fadadf.yaml b/releasenotes/notes/validate-local-interface-faec300f80fadadf.yaml deleted file mode 100644 index 681db372b..000000000 --- a/releasenotes/notes/validate-local-interface-faec300f80fadadf.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - Validate the local_interface for the undercloud install to fail fast if - the interface does not actually exist on the system. If net_config_override - is configured, the local_interface will not be validated. - - | - Log a warning if undercloud.conf is missing to indicate that the defaults - will be used. diff --git a/releasenotes/notes/wire-up-tripleo-validations-undercloud-upgrade.yaml-244b86a00b260888.yaml b/releasenotes/notes/wire-up-tripleo-validations-undercloud-upgrade.yaml-244b86a00b260888.yaml deleted file mode 100644 index 7fed25e5b..000000000 --- a/releasenotes/notes/wire-up-tripleo-validations-undercloud-upgrade.yaml-244b86a00b260888.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - Wires up execution of the "post-upgrade" group of tripleo-validations to - sanity check the undercloud. The validations are executed at the - very end of the process, after the undercloud has been fully upgraded - and all services started in the upgraded versions. If there is an error it - is logged but not raised so these validations will not fail the upgrade. - The operator can set the existing 'enable_validations' to false to skip - these validations. diff --git a/releasenotes/notes/wire_up_undercloud_debug-f6fd5d21dfbab696.yaml b/releasenotes/notes/wire_up_undercloud_debug-f6fd5d21dfbab696.yaml deleted file mode 100644 index e5bdc3288..000000000 --- a/releasenotes/notes/wire_up_undercloud_debug-f6fd5d21dfbab696.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - undercloud_debug is now wired up for additional OpenStack services. See - `bug 1669895 `__ for more - information. diff --git a/releasenotes/notes/zaqar-httpd-a58c28f84541d482.yaml b/releasenotes/notes/zaqar-httpd-a58c28f84541d482.yaml deleted file mode 100644 index 4145f18e2..000000000 --- a/releasenotes/notes/zaqar-httpd-a58c28f84541d482.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Zaqar API now run over httpd in the undercloud. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index db4045d0e..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'instack-undercloud Release Notes' -copyright = u'2017, TripleO Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. - -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# The full version, including alpha/beta/rc tags. - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'instack-undercloudReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'instack-undercloudReleaseNotes.tex', u'instack-undercloud Release Notes Documentation', - u'2016, TripleO Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'instack-undercloudreleasenotes', u'instack-undercloud Release Notes Documentation', - [u'2016, TripleO Developers'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'instack-undercloudReleaseNotes', u'instack-undercloud Release Notes Documentation', - u'2016, TripleO Developers', 'instack-undercloudReleaseNotes', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# openstackdocstheme options -repository_name = 'openstack/instack-undercloud' -bug_project = 'tripleo' -bug_tag = 'documentation' diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 4def7d3a2..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -============================================ -Welcome to instack-undercloud Release Notes! -============================================ - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - rocky - queens - pike - ocata - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42e..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst deleted file mode 100644 index e43bfc0ce..000000000 --- a/releasenotes/source/pike.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Pike Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst deleted file mode 100644 index 36ac6160c..000000000 --- a/releasenotes/source/queens.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Queens Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517b7..000000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 2334dd5cf..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - - .. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 3502afb44..000000000 --- a/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -six>=1.10.0 # MIT -python-ironicclient>=2.3.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -python-novaclient>=9.1.0 # Apache-2.0 -python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -oslo.config>=5.2.0 # Apache-2.0 -oslo.utils>=3.33.0 # Apache-2.0 -psutil>=3.2.2 # BSD -netaddr>=0.7.18 # BSD -netifaces>=0.10.4 # MIT -pystache>=0.5.4 # MIT -os-refresh-config>=6.0.0 # Apache-2.0 -os-apply-config>=5.0.0 # Apache-2.0 -os-client-config>=1.28.0 # Apache-2.0 diff --git a/scripts/instack-haproxy-cert-update b/scripts/instack-haproxy-cert-update deleted file mode 100644 index 928d7e87d..000000000 --- a/scripts/instack-haproxy-cert-update +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -CERT_FILE="$1" -KEY_FILE="$2" -OUTPUT_FILE="$3" -REQUEST_NICKNAME="$4" - -if [[ -z "$CERT_FILE" || -z "$KEY_FILE" || -z "$OUTPUT_FILE" ]]; then - echo "You need to provide CERT_FILE KEY_FILE and finally OUTPUT_FILE" \ - "as arguments in that order." - exit 1 -fi -if [[ ! -f "$CERT_FILE" || ! -f "$KEY_FILE" ]]; then - echo "Certificate and key files must exist!" - exit 1 -fi -if [ -z "$REQUEST_NICKNAME" ]; then - echo "Request nickname must be specified in arguments." - exit 1 -fi - -cat $CERT_FILE $KEY_FILE > $OUTPUT_FILE -if systemctl -q is-active haproxy; then - systemctl reload haproxy -fi diff --git a/scripts/instack-install-undercloud b/scripts/instack-install-undercloud deleted file mode 100755 index da87e1dce..000000000 --- a/scripts/instack-install-undercloud +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python -c "from instack_undercloud import undercloud; undercloud.install('$(dirname $0)/..')" diff --git a/scripts/instack-pre-upgrade-undercloud b/scripts/instack-pre-upgrade-undercloud deleted file mode 100755 index 33617eda3..000000000 --- a/scripts/instack-pre-upgrade-undercloud +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python -c "from instack_undercloud import undercloud; undercloud.pre_upgrade()" diff --git a/scripts/instack-upgrade-undercloud b/scripts/instack-upgrade-undercloud deleted file mode 100755 index d7e0c9fe7..000000000 --- a/scripts/instack-upgrade-undercloud +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python -c "from instack_undercloud import undercloud; undercloud.install('$(dirname $0)/..', upgrade=True)" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index a2b444376..000000000 --- a/setup.cfg +++ /dev/null @@ -1,44 +0,0 @@ -[metadata] -name = instack-undercloud -summary = instack-undercloud -description-file = - README.md -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://git.openstack.org/cgit/openstack/instack-undercloud -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[files] -packages = - instack_undercloud - -scripts = - scripts/instack-install-undercloud - scripts/instack-pre-upgrade-undercloud - scripts/instack-upgrade-undercloud - scripts/instack-haproxy-cert-update - -data_files = - share/instack-undercloud/ = elements/* - share/instack-undercloud/json-files = json-files/* - share/instack-undercloud/ = undercloud.conf.sample - share/instack-undercloud/templates = templates/* - -[entry_points] -oslo.config.opts = - instack-undercloud = instack_undercloud.undercloud:list_opts - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d84432..000000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/templates/config.json.template b/templates/config.json.template deleted file mode 100644 index c2172e8e7..000000000 --- a/templates/config.json.template +++ /dev/null @@ -1,41 +0,0 @@ -{ - "hiera": { - "hierarchy": [ - {{#HIERADATA_OVERRIDE}} - "{{.}}", - {{/HIERADATA_OVERRIDE}} - "\"%{::operatingsystem}\"", - "\"%{::osfamily}\"", - "puppet-stack-config" - ]}, - "local-ip": "{{LOCAL_IP}}", - "local-ip-wrapped": "{{LOCAL_IP_WRAPPED}}", - "masquerade_networks": {{MASQUERADE_NETWORKS}}, - "service_certificate": "{{UNDERCLOUD_SERVICE_CERTIFICATE}}", - "public_host": "{{UNDERCLOUD_PUBLIC_HOST}}", - "admin_password": "{{UNDERCLOUD_ADMIN_PASSWORD}}", - "neutron": { - "dhcp_start": "{{DHCP_START}}", - "dhcp_end": "{{DHCP_END}}", - "network_cidr": "{{NETWORK_CIDR}}", - "network_gateway": "{{NETWORK_GATEWAY}}" - }, - "inspection": { - "interface": "{{DISCOVERY_INTERFACE}}", - "iprange": "{{DISCOVERY_IPRANGE}}", - "runbench": "{{DISCOVERY_RUNBENCH}}" - }, - "os_net_config": { - {{> net_config}} - }, - "keystone": { - "host": "{{LOCAL_IP}}" - }, - "ironic": { - "service-password": "{{UNDERCLOUD_IRONIC_PASSWORD}}" - }, - "bootstrap_host": { - "bootstrap_nodeid": "undercloud", - "nodeid": "undercloud" - } -} diff --git a/templates/net-config.json.template b/templates/net-config.json.template deleted file mode 100644 index ffc04d548..000000000 --- a/templates/net-config.json.template +++ /dev/null @@ -1,25 +0,0 @@ -"network_config": [ - { - "type": "ovs_bridge", - "name": "br-ctlplane", - "ovs_extra": [ - "br-set-external-id br-ctlplane bridge-id br-ctlplane" - ], - "members": [ - { - "type": "interface", - "name": "{{LOCAL_INTERFACE}}", - "primary": "true", - "mtu": {{LOCAL_MTU}}, - "dns_servers": {{UNDERCLOUD_NAMESERVERS}} - } - ], - "addresses": [ - { - "ip_netmask": "{{PUBLIC_INTERFACE_IP}}" - } - ], - "routes": {{SUBNETS_STATIC_ROUTES}}, - "mtu": {{LOCAL_MTU}} -} -] diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 25a9112cc..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -# Doc requirements -openstackdocstheme>=1.18.1 # Apache-2.0 -sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD - -hacking<0.11,>=0.10.0 - -coverage!=4.4,>=4.0 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -python-subunit>=1.0.0 # Apache-2.0/BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT -mock>=2.0.0 # BSD -oslotest>=3.2.0 # Apache-2.0 -bashate>=0.5.1 # Apache-2.0 -reno>=2.5.0 # Apache-2.0 diff --git a/tools/releasenotes_tox.sh b/tools/releasenotes_tox.sh deleted file mode 100755 index 4fecfd929..000000000 --- a/tools/releasenotes_tox.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -rm -rf releasenotes/build - -sphinx-build -a -E -W \ - -d releasenotes/build/doctrees \ - -b html \ - releasenotes/source releasenotes/build/html -BUILD_RESULT=$? - -UNCOMMITTED_NOTES=$(git status --porcelain | \ - awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}') - -if [ "${UNCOMMITTED_NOTES}" ] -then - cat < - -# IP information for the interface on the Undercloud that will be -# handling the PXE boots and DHCP for Overcloud instances. The IP -# portion of the value will be assigned to the network interface -# defined by local_interface, with the netmask defined by the prefix -# portion of the value. (string value) -#local_ip = 192.168.24.1/24 - -# Virtual IP or DNS address to use for the public endpoints of -# Undercloud services. Only used with SSL. (string value) -# Deprecated group/name - [DEFAULT]/undercloud_public_vip -#undercloud_public_host = 192.168.24.2 - -# Virtual IP or DNS address to use for the admin endpoints of -# Undercloud services. Only used with SSL. (string value) -# Deprecated group/name - [DEFAULT]/undercloud_admin_vip -#undercloud_admin_host = 192.168.24.3 - -# DNS nameserver(s) to use for the undercloud node. (list value) -#undercloud_nameservers = - -# List of ntp servers to use. (list value) -#undercloud_ntp_servers = - -# DNS domain name to use when deploying the overcloud. The overcloud -# parameter "CloudDomain" must be set to a matching value. (string -# value) -#overcloud_domain_name = localdomain - -# List of routed network subnets for provisioning and introspection. -# Comma separated list of names/tags. For each network a section/group -# needs to be added to the configuration file with these parameters -# set: cidr, dhcp_start, dhcp_end, inspection_iprange, gateway and -# masquerade. Note: The section/group must be placed before or after -# any other section. (See the example section [ctlplane-subnet] in the -# sample configuration file.) (list value) -#subnets = ctlplane-subnet - -# Name of the local subnet, where the PXE boot and DHCP interfaces for -# overcloud instances is located. The IP address of the -# local_ip/local_interface should reside in this subnet. (string -# value) -#local_subnet = ctlplane-subnet - -# Certificate file to use for OpenStack service SSL connections. -# Setting this enables SSL for the OpenStack API endpoints, leaving it -# unset disables SSL. (string value) -#undercloud_service_certificate = - -# When set to True, an SSL certificate will be generated as part of -# the undercloud install and this certificate will be used in place of -# the value for undercloud_service_certificate. The resulting -# certificate will be written to -# /etc/pki/tls/certs/undercloud-[undercloud_public_host].pem. This -# certificate is signed by CA selected by the -# "certificate_generation_ca" option. (boolean value) -#generate_service_certificate = true - -# The certmonger nickname of the CA from which the certificate will be -# requested. This is used only if the generate_service_certificate -# option is set. Note that if the "local" CA is selected the -# certmonger's local CA certificate will be extracted to /etc/pki/ca- -# trust/source/anchors/cm-local-ca.pem and subsequently added to the -# trust chain. (string value) -#certificate_generation_ca = local - -# The kerberos principal for the service that will use the -# certificate. This is only needed if your CA requires a kerberos -# principal. e.g. with FreeIPA. (string value) -#service_principal = - -# Network interface on the Undercloud that will be handling the PXE -# boots and DHCP for Overcloud instances. (string value) -#local_interface = eth1 - -# MTU to use for the local_interface. (integer value) -#local_mtu = 1500 - -# DEPRECATED: Network that will be masqueraded for external access, if -# required. This should be the subnet used for PXE booting. (string -# value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: With support for routed networks, masquerading of the -# provisioning networks is moved to a boolean option for each subnet. -#masquerade_network = 192.168.24.0/24 - -# Path to hieradata override file. If set, the file will be copied -# under /etc/puppet/hieradata and set as the first file in the hiera -# hierarchy. This can be used to custom configure services beyond what -# undercloud.conf provides (string value) -#hieradata_override = - -# Path to network config override template. If set, this template will -# be used to configure the networking via os-net-config. Must be in -# json format. Templated tags can be used within the template, see -# instack-undercloud/elements/undercloud-stack-config/net- -# config.json.template for example tags (string value) -#net_config_override = - -# Network interface on which inspection dnsmasq will listen. If in -# doubt, use the default value. (string value) -# Deprecated group/name - [DEFAULT]/discovery_interface -#inspection_interface = br-ctlplane - -# Whether to enable extra hardware collection during the inspection -# process. Requires python-hardware or python-hardware-detect package -# on the introspection image. (boolean value) -#inspection_extras = true - -# Whether to run benchmarks when inspecting nodes. Requires -# inspection_extras set to True. (boolean value) -# Deprecated group/name - [DEFAULT]/discovery_runbench -#inspection_runbench = false - -# Makes ironic-inspector enroll any unknown node that PXE-boots -# introspection ramdisk in Ironic. By default, the "ipmi" driver is -# used for new nodes (it is automatically enabled when this option is -# set to True). Set discovery_default_driver to override. -# Introspection rules can also be used to specify driver information -# for newly enrolled nodes. (boolean value) -#enable_node_discovery = false - -# The default hardware type to use for newly discovered nodes -# (requires enable_node_discovery set to True). It is automatically -# added to enabled_hardware_types. (string value) -#discovery_default_driver = ipmi - -# Whether to enable the debug log level for Undercloud OpenStack -# services. (boolean value) -#undercloud_debug = true - -# Whether to update packages during the Undercloud install. (boolean -# value) -#undercloud_update_packages = true - -# Whether to install Tempest in the Undercloud. (boolean value) -#enable_tempest = true - -# Whether to install Telemetry services (ceilometer, gnocchi, aodh, -# panko ) in the Undercloud. (boolean value) -#enable_telemetry = false - -# Whether to install the TripleO UI. (boolean value) -#enable_ui = true - -# Whether to install requirements to run the TripleO validations. -# (boolean value) -#enable_validations = true - -# Whether to install the Volume service. It is not currently used in -# the undercloud. (boolean value) -#enable_cinder = false - -# Whether to install novajoin metadata service in the Undercloud. -# (boolean value) -#enable_novajoin = false - -# Whether to enable docker container images to be build on the -# undercloud. (boolean value) -#enable_container_images_build = true - -# Array of host/port combiniations of docker insecure registries. -# (list value) -#docker_insecure_registries = - -# One Time Password to register Undercloud node with an IPA server. -# Required when enable_novajoin = True. (string value) -#ipa_otp = - -# Whether to use iPXE for deploy and inspection. (boolean value) -# Deprecated group/name - [DEFAULT]/ipxe_deploy -#ipxe_enabled = true - -# Maximum number of attempts the scheduler will make when deploying -# the instance. You should keep it greater or equal to the number of -# bare metal nodes you expect to deploy at once to work around -# potential race condition when scheduling. (integer value) -# Minimum value: 1 -#scheduler_max_attempts = 30 - -# Whether to clean overcloud nodes (wipe the hard drive) between -# deployments and after the introspection. (boolean value) -#clean_nodes = false - -# List of enabled bare metal hardware types (next generation drivers). -# (list value) -#enabled_hardware_types = ipmi,redfish,ilo,idrac - -# An optional docker 'registry-mirror' that will beconfigured in -# /etc/docker/daemon.json. (string value) -#docker_registry_mirror = - -# List of additional architectures enabled in your cloud environment. -# The list of supported values is: ppc64le (list value) -#additional_architectures = - -# Enable support for routed ctlplane networks. (boolean value) -#enable_routed_networks = false - - -[auth] - -# -# From instack-undercloud -# - -# Password used for MySQL root user. If left unset, one will be -# automatically generated. (string value) -#undercloud_db_password = - -# Keystone admin token. If left unset, one will be automatically -# generated. (string value) -#undercloud_admin_token = - -# Keystone admin password. If left unset, one will be automatically -# generated. (string value) -#undercloud_admin_password = - -# Glance service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_glance_password = - -# Heat db encryption key(must be 16, 24, or 32 characters. If left -# unset, one will be automatically generated. (string value) -#undercloud_heat_encryption_key = - -# Heat service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_heat_password = - -# Heat cfn service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_heat_cfn_password = - -# Neutron service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_neutron_password = - -# Nova service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_nova_password = - -# Ironic service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_ironic_password = - -# Aodh service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_aodh_password = - -# Gnocchi service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_gnocchi_password = - -# Ceilometer service password. If left unset, one will be -# automatically generated. (string value) -#undercloud_ceilometer_password = - -# Panko service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_panko_password = - -# Ceilometer metering secret. If left unset, one will be automatically -# generated. (string value) -#undercloud_ceilometer_metering_secret = - -# Ceilometer snmpd read-only user. If this value is changed from the -# default, the new value must be passed in the overcloud environment -# as the parameter SnmpdReadonlyUserName. This value must be between 1 -# and 32 characters long. (string value) -#undercloud_ceilometer_snmpd_user = ro_snmp_user - -# Ceilometer snmpd password. If left unset, one will be automatically -# generated. (string value) -#undercloud_ceilometer_snmpd_password = - -# Swift service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_swift_password = - -# Mistral service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_mistral_password = - -# Rabbitmq cookie. If left unset, one will be automatically generated. -# (string value) -#undercloud_rabbit_cookie = - -# Rabbitmq password. If left unset, one will be automatically -# generated. (string value) -#undercloud_rabbit_password = - -# Rabbitmq username. If left unset, one will be automatically -# generated. (string value) -#undercloud_rabbit_username = - -# Heat stack domain admin password. If left unset, one will be -# automatically generated. (string value) -#undercloud_heat_stack_domain_admin_password = - -# Swift hash suffix. If left unset, one will be automatically -# generated. (string value) -#undercloud_swift_hash_suffix = - -# HAProxy stats password. If left unset, one will be automatically -# generated. (string value) -#undercloud_haproxy_stats_password = - -# Zaqar password. If left unset, one will be automatically generated. -# (string value) -#undercloud_zaqar_password = - -# Horizon secret key. If left unset, one will be automatically -# generated. (string value) -#undercloud_horizon_secret_key = - -# Cinder service password. If left unset, one will be automatically -# generated. (string value) -#undercloud_cinder_password = - -# Novajoin vendordata plugin service password. If left unset, one will -# be automatically generated. (string value) -#undercloud_novajoin_password = - - -[ctlplane-subnet] - -# -# From instack-undercloud -# - -# Network CIDR for the Neutron-managed subnet for Overcloud instances. -# (string value) -# Deprecated group/name - [DEFAULT]/network_cidr -#cidr = 192.168.24.0/24 - -# Start of DHCP allocation range for PXE and DHCP of Overcloud -# instances on this network. (string value) -# Deprecated group/name - [DEFAULT]/dhcp_start -#dhcp_start = 192.168.24.5 - -# End of DHCP allocation range for PXE and DHCP of Overcloud instances -# on this network. (string value) -# Deprecated group/name - [DEFAULT]/dhcp_end -#dhcp_end = 192.168.24.24 - -# Temporary IP range that will be given to nodes on this network -# during the inspection process. Should not overlap with the range -# defined by dhcp_start and dhcp_end, but should be in the same ip -# subnet. (string value) -# Deprecated group/name - [DEFAULT]/inspection_iprange -#inspection_iprange = 192.168.24.100,192.168.24.120 - -# Network gateway for the Neutron-managed network for Overcloud -# instances on this network. (string value) -# Deprecated group/name - [DEFAULT]/network_gateway -#gateway = 192.168.24.1 - -# The network will be masqueraded for external access. (boolean value) -#masquerade = false diff --git a/zuul.d/layout.yaml b/zuul.d/layout.yaml deleted file mode 100644 index 457d85bda..000000000 --- a/zuul.d/layout.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -- project: - templates: - - puppet-openstack-check-jobs - check: - jobs: - - tripleo-ci-centos-7-undercloud-oooq - - tripleo-ci-centos-7-undercloud-upgrades - gate: - queue: tripleo - jobs: - - tripleo-ci-centos-7-undercloud-oooq diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml deleted file mode 100644 index cc00ce1c9..000000000 --- a/zuul.d/project.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- project: - templates: - - check-requirements - - openstack-cover-jobs - - openstack-lower-constraints-jobs - - openstack-python-jobs - - openstack-python35-jobs - - openstack-python36-jobs - - release-notes-jobs-python3