Retire openstack-chef: remove repo content

OpenStack-chef project is retiring
- https://review.opendev.org/c/openstack/governance/+/905279

this commit remove the content of this project repo

Depends-On: https://review.opendev.org/c/openstack/project-config/+/909134
Change-Id: Idb3769e91814eb7703a3a4787f485fab1caa97f8
This commit is contained in:
Ghanshyam Mann 2024-02-15 14:17:43 -08:00
parent 763b9b5e18
commit 0a53c4a961
209 changed files with 8 additions and 7101 deletions

View File

@ -1,11 +0,0 @@
wfBM0OA4p1hsqM1tsRNCCFhcjDXP4lffgLvFNbTPgMjh6TZvLDHlrwiA41rqMzdI
SOzSsyO9TBlcwfinbyJcObMpLklSv3wNJ6oWEpa1e78YL998RXx3zmMOoT/SEfWI
ZFN/H5xe+YX4T8T3+W+rfjQH3GP0fTbNfOYtgDthsmtUF05LtBlgxxUXscGkG74D
qDO6f3K97Dcd07QFeMorwvrIBkYDtTyU90HqR1aksMD06fpA2LSGfrLu1ykVUUlb
rTi+O9tyMifSs9SLoJjnrDkeujh3UQXRr4vRwP9tkefEtbQE8OiRpRJ72kLPX2eE
FQGQKjKCk3edAknp7RHhzk66GbUUzY/H+FAJfjFHycIR1rLAHxIbJFToH4dKGj9w
jFkHxgnnGK8TPm7mwyrvo6obR8gBSbG+1xgMiA2/fNoLyuiSqtm21n/1/81QPKBc
FllWAaA8xR6l2Ji8JPoJ705Msk6183ODeN//vn2JvwxacUL9HWqI6Gy2SsETQ3yW
A6PzVVnKHpevco7TH9HhuhKIlpYbjWv4YMUlLAz/5PbbLiOJtDGB54o/6hQGqOP0
3jscQ0ciPjpLgwvQmQJztrB/Uvq6Un2moPLLygwQK0TYSNmM5+BMnB00/aK++y7x
mvymJcSxd48DBrA2NuFDhNBWwOcZSB1R/N50Vs/iq5I=

View File

@ -1,16 +0,0 @@
# See http://docs.opscode.com/config_rb_knife.html
# for more information on knife configuration options
current_dir = File.dirname(__FILE__)
log_level :info
log_location STDOUT
node_name 'nodienode'
client_key "#{current_dir}/nodienode.pem"
validation_client_name 'chef-validator'
validation_key "#{current_dir}/validator.pem"
chef_server_url 'https://api.opscode.com/organizations/my_awesome_org'
cache_type 'BasicFile'
cache_options(path: "#{ENV['HOME']}/.chef/checksums")
cookbook_path ["#{current_dir}/../cookbooks",
"#{current_dir}/../site-cookbooks"]
knife[:secret_file] = "#{current_dir}/encrypted_data_bag_secret"

View File

@ -1,9 +0,0 @@
[local_phases]
unit = 'berks vendor -e integration cookbooks'
lint = 'cookstyle --display-cop-names --extra-details'
syntax = "./scripts/json_check.sh"
provision = "echo skipping"
deploy = "echo skipping"
smoke = "echo skipping"
functional = "echo skipping"
cleanup = "echo skipping"

23
.gitignore vendored
View File

@ -1,23 +0,0 @@
.DS_Store
.eggs/
.kitchen/
.kitchen.local.yml
.bundle
.vagrant
berks-cookbooks/
Berksfile.lock
clients/
./cookbooks/
Gemfile.lock
nodes/
vms/
.chef/nodienode.pem
.chef/validator.pem
.chef/local-mode-cache/
.project
.tox
.idea/*
.venv/
deploy-guide/build
doc/build
*.egg-info

View File

@ -1,69 +0,0 @@
---
driver:
name: dokken
privileged: true
chef_version: 14
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
- /lib/modules:/lib/modules:ro
- /run/openvswitch:/run/openvswitch
transport:
name: dokken
provisioner:
name: dokken
encrypted_data_bag_secret_key_path: .chef/encrypted_data_bag_secret
roles_path: roles
environments_path: environments
attributes:
openstack:
secret:
key_path: /opt/kitchen/encrypted_data_bag_secret
verifier:
name: inspec
platforms:
- name: ubuntu-18.04
driver:
image: dokken/ubuntu-18.04
pid_one_command: /bin/systemd
- name: centos-7
platform: rhel
driver:
image: dokken/centos-7
pid_one_command: /usr/lib/systemd/systemd
suites:
- name: default
driver:
ports:
- '80'
- '8080'
- '443'
- '3460'
- '3306'
- '5672'
- '5000'
- '35357'
- '8774'
- '8776'
- '9292'
- '9696'
- '8000'
- '8003'
- '8004'
run_list:
- recipe[openstack_test]
- role[allinone]
provisioner:
client_rb:
environment: allinone
verifier:
inspec_tests:
- test/tempest/default
attributes:
openstack:
network:
tun_network_bridge_interface: eth0

View File

@ -1,85 +0,0 @@
---
driver:
name: vagrant
client_rb:
treat_deprecation_warnings_as_errors: true
resource_cloning: false
provisioner:
name: chef_zero
# You may wish to disable always updating cookbooks in CI or other testing
# environments.
# For example:
# always_update_cookbooks: <%= !ENV['CI'] %>
always_update_cookbooks: true
product_name: chef
product_version: 14
# Copy secret to /tmp/kitchen on test VM. Kitchen tries to gather secrets
# before any recipes had a chance to run -> we cannot use a recipe to put the
# secrets file in place.
encrypted_data_bag_secret_key_path: .chef/encrypted_data_bag_secret
roles_path: roles
environments_path: environments
attributes:
openstack:
secret:
key_path: /tmp/kitchen/encrypted_data_bag_secret
verifier:
name: inspec
platforms:
- name: ubuntu-18.04
driver:
box: bento/ubuntu-18.04
- name: centos-7
driver:
box: bento/centos-7
suites:
- name: compute1
customize:
cpus: 2
memory: 2048
driver:
network:
- ["private_network", {ip: "192.168.100.61"}]
- ["private_network", {ip: "192.168.101.61"}]
provisioner:
client_rb:
environment: multinode
run_list:
- recipe[openstack_test]
- role[multinode-compute]
- name: compute2
customize:
cpus: 2
memory: 2048
driver:
network:
- ["private_network", {ip: "192.168.100.62"}]
- ["private_network", {ip: "192.168.101.62"}]
provisioner:
client_rb:
environment: multinode
run_list:
- recipe[openstack_test]
- role[multinode-compute]
- name: controller
customize:
cpus: 4
memory: 8192
driver:
network:
- ["private_network", {ip: "192.168.100.60"}]
- ["private_network", {ip: "192.168.101.60"}]
provisioner:
client_rb:
environment: multinode
run_list:
- recipe[openstack_test]
- role[multinode-controller]
verifier:
inspec_tests:
- test/tempest/default

View File

@ -1,4 +0,0 @@
Chef/Modernize/FoodcriticComments:
Enabled: true
Chef/Style/CopyrightCommentFormat:
Enabled: true

View File

@ -1,198 +0,0 @@
- job:
name: openstack-chef-delivery
parent: base
description: Run Chef Delivery tests with openstack-chef
required-projects: openstack/openstack-chef
pre-run: playbooks/pre-delivery.yaml
run: playbooks/delivery.yaml
timeout: 1800
voting: false # TODO(ramereth): Remove after this gets merged
- job:
name: openstack-chef-integration-ubuntu
parent: base
nodeset: ubuntu-bionic
description: Run integration tests with openstack-chef on Ubuntu Bionic
required-projects: openstack/openstack-chef
pre-run: playbooks/pre-integration.yaml
run: playbooks/integration.yaml
post-run: playbooks/post.yaml
timeout: 5400
voting: false # TODO(ramereth): Remove after this gets merged
- job:
name: openstack-chef-minimal-integration-ubuntu
parent: base
nodeset: ubuntu-bionic
description: Run minimal integration tests with openstack-chef on Ubuntu Bionic
required-projects: openstack/openstack-chef
pre-run: playbooks/pre-integration.yaml
run: playbooks/minimal.yaml
post-run: playbooks/post.yaml
timeout: 5400
voting: false # TODO(ramereth): Remove after this gets merged
- job:
name: openstack-chef-integration-centos-stream-8
parent: base
nodeset: centos-8-stream
description: Run integration tests with openstack-chef on CentOS
required-projects: openstack/openstack-chef
pre-run: playbooks/pre-integration.yaml
run: playbooks/integration.yaml
post-run: playbooks/post.yaml
timeout: 5400
voting: false # TODO(ramereth): Remove after this gets merged
- job:
name: openstack-chef-minimal-integration-centos-stream-8
parent: base
nodeset: centos-8-stream
description: Run minimal integration tests with openstack-chef on CentOS
required-projects: openstack/openstack-chef
pre-run: playbooks/pre-integration.yaml
run: playbooks/minimal.yaml
post-run: playbooks/post.yaml
timeout: 5400
voting: false # TODO(ramereth): Remove after this gets merged
- job:
name: openstack-chef16-minimal-integration-ubuntu
parent: openstack-chef-minimal-integration-ubuntu
vars:
openstack_chef_client_version: 16
- job:
name: openstack-chef-integration-centos-7
parent: openstack-chef-integration-centos-stream-8
nodeset: centos-7
- job:
name: openstack-chef-minimal-integration-centos-7
parent: openstack-chef-minimal-integration-centos-stream-8
nodeset: centos-7
- job:
name: openstack-chef16-minimal-integration-centos-7
parent: openstack-chef-minimal-integration-centos-7
vars:
openstack_chef_client_version: 16
- job:
name: openstack-chef16-minimal-integration-centos-stream-8
parent: openstack-chef-minimal-integration-centos-stream-8
vars:
openstack_chef_client_version: 16
- job:
name: openstack-cinc16-integration-ubuntu
parent: openstack-chef-integration-ubuntu
vars:
openstack_chef_client_type: cinc
openstack_chef_client_version: 16
- job:
name: openstack-cinc-integration-ubuntu
parent: openstack-chef-integration-ubuntu
vars:
openstack_chef_client_type: cinc
- job:
name: openstack-cinc16-integration-centos-7
parent: openstack-chef-integration-centos-7
vars:
openstack_chef_client_type: cinc
openstack_chef_client_version: 16
- job:
name: openstack-cinc16-integration-centos-stream-8
parent: openstack-chef-integration-centos-stream-8
vars:
openstack_chef_client_type: cinc
openstack_chef_client_version: 16
- job:
name: openstack-cinc-integration-centos-7
parent: openstack-chef-integration-centos-7
vars:
openstack_chef_client_type: cinc
- job:
name: openstack-cinc-integration-centos-stream-8
parent: openstack-chef-integration-centos-stream-8
vars:
openstack_chef_client_type: cinc
- project-template:
name: openstack-chef-repo-jobs
queue: openstack-chef
check:
jobs:
- openstack-chef-delivery
- openstack-chef-integration-centos-7
- openstack-chef-integration-centos-stream-8
- openstack-chef-integration-ubuntu
- openstack-cinc16-integration-centos-7
- openstack-cinc16-integration-centos-stream-8
- openstack-cinc16-integration-ubuntu
- openstack-cinc-integration-centos-stream-8
- openstack-cinc-integration-centos-7
- openstack-cinc-integration-ubuntu
gate:
jobs:
- openstack-chef-delivery
- openstack-chef-integration-centos-7
- openstack-chef-integration-centos-stream-8
- openstack-chef-integration-ubuntu
- openstack-cinc16-integration-centos-7
- openstack-cinc16-integration-centos-stream-8
- openstack-cinc16-integration-ubuntu
- openstack-cinc-integration-centos-7
- openstack-cinc-integration-centos-stream-8
- openstack-cinc-integration-ubuntu
- project-template:
name: openstack-chef-jobs
queue: openstack-chef
check:
jobs:
- openstack-chef-delivery
- openstack-chef-integration-centos-7
- openstack-chef-integration-centos-stream-8
- openstack-chef-integration-ubuntu
- openstack-chef-minimal-integration-centos-7
- openstack-chef-minimal-integration-centos-stream-8
- openstack-chef-minimal-integration-ubuntu
- openstack-chef16-minimal-integration-centos-7
- openstack-chef16-minimal-integration-centos-stream-8
- openstack-chef16-minimal-integration-ubuntu
- openstack-cinc16-integration-centos-7
- openstack-cinc16-integration-centos-stream-8
- openstack-cinc16-integration-ubuntu
- openstack-cinc-integration-centos-7
- openstack-cinc-integration-centos-stream-8
- openstack-cinc-integration-ubuntu
gate:
jobs:
- openstack-chef-delivery
- openstack-chef-integration-centos-7
- openstack-chef-integration-centos-stream-8
- openstack-chef-integration-ubuntu
- openstack-chef-minimal-integration-centos-7
- openstack-chef-minimal-integration-centos-stream-8
- openstack-chef-minimal-integration-ubuntu
- openstack-chef16-minimal-integration-centos-7
- openstack-chef16-minimal-integration-centos-stream-8
- openstack-chef16-minimal-integration-ubuntu
- openstack-cinc16-integration-centos-7
- openstack-cinc16-integration-centos-stream-8
- openstack-cinc16-integration-ubuntu
- openstack-cinc-integration-centos-7
- openstack-cinc-integration-centos-stream-8
- openstack-cinc-integration-ubuntu
- project:
templates:
- deploy-guide-jobs
- publish-openstack-docs-pti
- openstack-chef-repo-jobs

View File

@ -1,34 +0,0 @@
source 'https://supermarket.chef.io'
solver :ruby, :required
%w(
bare-metal
block-storage
common
compute
dashboard
dns
identity
image
integration-test
network
ops-database
ops-messaging
orchestration
telemetry
).each do |cookbook|
if Dir.exist?("../cookbook-openstack-#{cookbook}")
cookbook "openstack-#{cookbook}", path: "../cookbook-openstack-#{cookbook}"
else
cookbook "openstack-#{cookbook}", git: "https://opendev.org/openstack/cookbook-openstack-#{cookbook}"
end
end
if Dir.exist?('../cookbook-openstackclient')
cookbook 'openstackclient', path: '../cookbook-openstackclient'
else
cookbook 'openstackclient', git: 'https://opendev.org/openstack/cookbook-openstackclient'
end
cookbook 'openstack_test', path: 'test/cookbooks/openstack_test'

View File

@ -1,35 +0,0 @@
Contributing
============
How To Get Started
------------------
If you would like to contribute to the development of OpenStack Chef
Cookbooks, you must follow the steps in this page:
https://docs.openstack.org/infra/manual/developers.html
Gerrit Workflow
---------------
Once those steps have been completed, changes to OpenStack should be
submitted for review via the Gerrit tool, following the workflow
documented at:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs
----
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/openstack-chef
Contacts
--------
- Mailing list: [chef] on the OpenStack-Discuss mailing list
- IRC: ``#openstack-chef`` is our channel on irc.oftc.net
- Wiki: https://wiki.openstack.org/wiki/Chef/GettingStarted

201
LICENSE
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,343 +1,10 @@
OpenStack Chef
==============
This project is no longer maintained.
.. image:: https://governance.openstack.org/badges/openstack-chef.svg
:target: https://governance.openstack.org/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
Testing framework for deploying OpenStack using Chef
====================================================
This is the testing framework for OpenStack deployed using `Chef`_. We
leverage this to test against our changes to our `cookbooks`_ to make
sure that you can still build a cluster from the ground up with any
changes we introduce.
This framework also gives us an opportunity to show different Reference
Architectures and a sane example on how to start with OpenStack using
Chef.
With the ``master`` branch of the cookbooks, which is currently tied to
the base OpenStack Train release, this supports deploying to Ubuntu
18.04 and CentOS 7 or 8 in monolithic, or allinone, and non-HA multinode
configurations with Neutron. The cookbooks support a fully HA
configuration, but we do not test for that as there are far numerous
paths to HA.
.. _Chef: https://www.chef.io
.. _cookbooks: https://wiki.openstack.org/wiki/Chef/GettingStarted
Prerequisites
-------------
- Chef 16 or higher
- `Chef Workstation`_ 21.10.640 or later
- `Vagrant`_ 2.0 or later with `VirtualBox>`_ or some other provider
.. _Chef Workstation: https://downloads.chef.io/chef-workstation/
.. _Vagrant: https://www.vagrantup.com/downloads.html
.. _VirtualBox: https://www.virtualbox.org/wiki/Downloads
Getting the Code (this repo)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
$ git clone https://opendev.org/openstack/openstack-chef.git
$ cd openstack-chef
The OpenStack cookbooks by default use encrypted data bags for
configuring passwords. There are four data bags : ``user_passwords``,
``db_passwords``, ``service_passwords``, ``secrets``. There already
exists a ``data_bags/`` directory, so you shouldn't need to create any
for a proof of concept. If you do, something is wrong. See the
`Data Bags`_ section for the gory details.
Supported Deployments
---------------------
For each deployment model, there is a corresponding file in the ``doc/``
directory. Please review that for specific details and additional setup
that might be required before deploying the cloud.
Kitchen Deploy Commands
-----------------------
These commands will produce various OpenStack cluster configurations,
the simplest being a monolithic Compute Controller with Neutron
(allinone) which contains all supported cookbooks being deployed.
Due to memory constraints with our CI environment, we also have the
minimal suite which only tests keystone, glance, neutron, cinder and
nova. Running this suite should duplicate what we do in our CI pipeline.
We also have individual suites for each cookbook if you are just working
on a single cookbook and don't require the allinone suite.
These deployments are not intended to be production-ready, and will need
adaptation to your environment. This is intended for development and
proof of concept deployments.
Kitchen Test Scenarios
----------------------
Initialize Chef Workstation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
$ eval "$(chef shell-init bash)"
Everything self-contained (allinone)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
# allinone with all supported cookbooks
$ kitchen test "default-(centos|ubuntu)"
Access the machine
~~~~~~~~~~~~~~~~~~
.. code-block:: console
$ kitchen login [centos|ubuntu]
$ sudo su -
Multiple nodes (non-HA)
~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
# Multinode with Neutron (1 controller + 2 compute nodes)
$ export KITCHEN_YAML=.kitchen.multi.yml
$ kitchen converge [centos|ubuntu|all]
$ kitchen verify [centos|ubuntu|all]
$ kitchen destroy [centos|ubuntu|all]``
Access the Controller
~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
$ kitchen login controller-[centos|ubuntu]
$ sudo su -
Access the Compute nodes
~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
$ cd vms
$ kitchen login compute1
# OR
$ kitchen login compute2
$ sudo su -
Testing The Controller
~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
# Access the controller as noted above
$ source /root/openrc
$ nova --version
$ openstack service list && openstack hypervisor list
$ openstack image list
$ openstack user list
$ openstack server list
Working With Security Groups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To allow SSH access to instances, a security group is defined as
follows:
.. code-block:: console
$ openstack security group list
$ openstack security group list default
$ openstack security group create allow_ssh --description "allow ssh to instances"
$ openstack security group rule create allow_ssh --protocol tcp --dst-port 22:22 --remote-ip 0.0.0.0/0
$ openstack security group list allow_ssh
Working With Keys
~~~~~~~~~~~~~~~~~
To allow SSH keys to be injected into instance, a key pair is defined as
follows:
.. code-block:: console
# generate a new key pair
$ openstack keypair create mykey > mykey.pem
$ chmod 600 mykey.pem
$ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
# verify the key pair has been imported
$ openstack keypair list
Booting up a cirros image on the Controller
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: console
$ openstack server create --flavor 1 --image cirros --security-group allow_ssh --key-name mykey test
Wait a few seconds and the run ``openstack server list`` if Status is
not Active, wait a few seconds and repeat.
Once status is active you should be able to log in using SSH, or
``vagrant ssh <vm_name>``
.. code-block:: console
$ ssh cirros@<ip address from openstack server list output>
Accessing The OpenStack Dashboard
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you would like to use the OpenStack dashboard you should go to
https://localhost:9443 and the username and password is
``admin/mypass``.
Verifying OpenStack With Tempest
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you log in to the ``controller`` machine you can test via the most
recent `Tempest`_ release.
.. _Tempest: https://opendev.org/openstack/tempest
.. code-block:: console
$ cd vms
$ vagrant ssh <controller>
$ sudo su -
root@controller:~ cd /opt/tempest
root@controller:/opt/tempest$ ./run_tempest.sh -V --smoke --serial
[-- snip --]
tempest.tests.test_wrappers.TestWrappers
test_pretty_tox 1.68
test_pretty_tox_fails 1.03
test_pretty_tox_serial 0.61
test_pretty_tox_serial_fails 0.55
Ran 233 tests in 13.869s
OK
Running flake8 ...
$
Cleanup
-------
To remove all the nodes and start over again with a different
environment or different environment attribute overrides, using the
following rake command.
.. code-block:: console
$ chef exec rake destroy_machines
To refresh all cookbooks, use the following commands.
.. code-block:: console
$ rm -rf cookbooks
$ chef exec rake berks_vendor
To clean up everything, use the following rake command.
.. code-block:: console
$ chef exec rake clean
Data Bags
---------
Some basic information about the use of data bags within this repo.
.. code-block:: console
# Show the list of data bags
$ chef exec knife data bag list -z
db_passwords
secrets
service_passwords
user_passwords
# Show the list of data bag items
$ chef exec knife data bag show db_passwords -z
cinder
dash
glance
horizon
keystone
neutron
nova
# Show contents of data bag item
$ chef exec knife data bag show db_passwords nova -z
Encrypted data bag detected, decrypting with provided secret.
nova: mypass
id: nova
# Update contents of data bag item
# set EDITOR env var to your editor. eg. EDITOR=vi
$ chef exec knife data bag edit secrets dispersion_auth_user -z
Data Bag Default Values
~~~~~~~~~~~~~~~~~~~~~~~
``db_passwords`` are set to "mypass" secrets are set to
``token_service`` passwords are set to "mypass" ``user_passwords`` are
set to "mypass"
Default Encrypted Data Bag Secret
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The default secret is stored here ``.chef/encrypted_data_bag_secret``
and referenced by ``.chef/knife.rb``.
When we say defaults, we mean that they are known by everyone with
access to this repository. Change these to something else before
deploying for real.
Known Issues and Workarounds
----------------------------
Windows Platform
~~~~~~~~~~~~~~~~
When using this on a Windows platform, here are some tweaks to make this
work:
- In order to get SSH to work, you will need an SSL client installed.
You can use the one that comes with `Git for Windows`_. You will need
to append ``C:\Program Files (x86)\Git\bin;`` to the system PATH.
.. _Git for Windows: http://git-scm.com/download
TODOs
-----
- Support for floating IPs
- Better instructions for multi-node network setup
- Easier debugging. Maybe a script to pull the logs from the controller.
License
=======
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
::
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

226
Rakefile
View File

@ -1,226 +0,0 @@
current_dir = File.dirname(__FILE__)
client_opts = "--chef-license accept --force-formatter --no-color -z --config #{current_dir}/.chef/knife.rb"
task default: ['test']
desc 'Default gate tests to run'
task test: %i(rubocop berks_vendor json_check)
def run_command(command)
if File.exist?('/opt/chef/bin/chef-client')
puts "PATH=/opt/chef/embedded/bin:$PATH #{command}"
sh %(PATH=/opt/chef/embedded/bin:$PATH #{command})
else
puts "chef exec #{command}"
sh %(chef exec #{command})
end
end
task :destroy_all do
run_command('rm -rf Gemfile.lock && rm -rf Berksfile.lock && rm -rf cookbooks/')
end
desc 'Vendor your cookbooks/'
task :berks_vendor do
if ENV['CHEF_MINIMAL'] == 'yes'
run_command('berks vendor cookbooks')
else
berksfile = ENV['PROJECT_DIR'] + '/Berksfile'
run_command("berks vendor -b #{berksfile} #{current_dir}/cookbooks")
end
end
desc 'Create Chef Key'
task :create_key do
unless File.exist?('.chef/validator.pem')
require 'openssl'
File.binwrite('.chef/validator.pem', OpenSSL::PKey::RSA.new(2048).to_pem)
end
end
desc 'Blow everything away'
task clean: [:destroy_all]
# CI tasks
require 'cookstyle'
require 'rubocop/rake_task'
desc 'Run RuboCop'
RuboCop::RakeTask.new do |task|
task.options << '--display-cop-names'
end
desc 'Validate data bags, environments and roles'
task :json_check do
require 'json'
['data_bags/*', 'environments', 'roles'].each do |sub_dir|
Dir.glob(sub_dir + '/*.json') do |env_file|
puts "Checking #{env_file}"
JSON.parse(File.read(env_file))
end
end
end
# Helper for running various testing commands
def _run_commands(desc, commands, openstack = true)
puts "## Running #{desc}"
commands.each do |command, options|
options.each do |option|
if openstack
sh %(sudo bash -c '. /root/openrc && #{command} #{option}')
else
sh %(#{command} #{option})
end
end
end
puts "## Finished #{desc}"
end
# use the correct environment depending on platform
if File.exist?('/usr/bin/apt-get')
@platform = 'ubuntu18'
elsif File.exist?('/usr/bin/yum')
@platform = 'centos7'
elsif File.exist?('/usr/bin/dnf')
@platform = 'centos8'
end
# Helper for looking at the starting environment
def _run_env_queries
_run_commands(
'basic common env queries', {
'uname' => ['-a'],
'pwd' => [''],
'env' => [''],
'/opt/chef/bin/chef-client' => ['--chef-license accept --version'],
'/opt/chef/bin/inspec' =>
[
'version --chef-license accept',
'detect --chef-license accept',
],
},
false
)
case @platform
when 'ubuntu18'
_run_commands(
'basic debian env queries', {
'ifconfig' => [''],
'cat' => ['/etc/apt/sources.list'],
},
false
)
when 'centos7', 'centos8'
_run_commands(
'basic rhel env queries', {
'/sbin/getenforce' => [''],
'/usr/sbin/ip' => ['addr'],
'cat' => ['/etc/yum.repos.d/*'],
},
false
)
end
end
def _save_logs(prefix, log_dir)
sh %(sleep 25)
sh %(mkdir -p #{log_dir}/#{prefix})
sh %(sudo journalctl -l > #{log_dir}/#{prefix}/journalctl.log)
case @platform
when 'ubuntu18'
sh %(sudo /bin/ss -tunlp > #{log_dir}/#{prefix}/netstat.log)
when 'centos7', 'centos8'
sh %(sudo /sbin/ss -tunlp > #{log_dir}/#{prefix}/netstat.log)
end
%w(
apache2
ceilometer
cinder
designate
glance
gnocchi
heat
httpd
keystone
mariadb
mysql
mysql-default
neutron
nova
openvswitch
rabbitmq
).each do |project|
sh %(mkdir -p #{log_dir}/#{prefix}/#{project})
sh %(sudo cp -rL /etc/#{project} #{log_dir}/#{prefix}/#{project}/etc || true)
sh %(sudo cp -rL /var/log/#{project} #{log_dir}/#{prefix}/#{project}/log || true)
end
end
desc 'Integration test on Infra'
task integration: %i(create_key berks_vendor) do
log_dir = ENV['WORKSPACE'] + '/logs'
sh %(mkdir #{log_dir})
# Translates project name into shorter names with underscores
project_name = ENV['PROJECT_NAME'].gsub('cookbook-openstack-', '').tr('-', '_')
# Use special roles for openstack-chef and cookbook-openstackclient projects
project_name =
case project_name
when 'openstack_chef'
'minimal'
when 'cookbook_openstackclient'
'openstackclient'
when 'integration_test'
'integration'
else
project_name
end
if ENV['CHEF_MINIMAL'] == 'yes'
# If CHEF_MINIMAL is set, then let's assume we're running the full minimal suite
project_name = 'minimal'
end
inspec_dir = 'test/integration/' + project_name.tr('_', '-') + '/inspec'
run_list = "role[#{project_name}],role[#{project_name}_test]"
# This is a workaround for allowing chef-client to run in local mode
sh %(sudo mkdir -p /etc/chef && sudo cp .chef/encrypted_data_bag_secret /etc/chef/openstack_data_bag_secret)
# Add a symlink in case we run cinc instead of chef
sh %(sudo ln -s /etc/chef /etc/cinc)
_run_env_queries
# Three passes to ensure idempotency. prefer each to times, even if it
# reads weird
(1..3).each do |i|
begin
puts "####### Pass #{i}"
# Kick off chef client in local mode, will converge OpenStack right on the gate job "in place"
sh %(sudo chef-client #{client_opts} -E integration -r '#{run_list}' > #{log_dir}/chef-client-pass#{i}.txt 2>&1)
rescue => e
raise "####### Pass #{i} failed with #{e.message}"
ensure
# make sure logs are saved, pass or fail
_save_logs("pass#{i}", log_dir)
sh %(sudo chown -R $USER #{log_dir}/pass#{i})
sh %(sudo chmod -R go+rx #{log_dir}/pass#{i})
end
end
# Run InSpec & Tempest tests
puts '## InSpec & Tempest'
begin
sh %(sudo /opt/chef/bin/inspec exec --no-color #{inspec_dir} --reporter=cli html:#{log_dir}/inspec.html)
if File.exist?('/opt/tempest-venv/tempest.sh')
# Run Tempest separately from InSpec due to no way of extending the command timeout beyond 600s
# https://github.com/inspec/inspec/issues/3866
sh %(sudo /opt/tempest-venv/tempest.sh)
else
puts 'Skipping Tempest tests...'
end
rescue => e
raise "####### InSpec & Tempest failed with #{e.message}"
ensure
# make sure logs are saved, pass or fail
_save_logs('inspec', log_dir)
sh %(sudo chown -R $USER #{log_dir}/inspec)
sh %(sudo chmod -R go+rx #{log_dir}/inspec)
end
end

View File

@ -1,8 +0,0 @@
Testing the Openstack Cookbook Repo
===================================
Basic tests are ``rubucop`` and ``berks``. To run these test use:
.. code-block:: console
$ chef exec rake

View File

@ -1,7 +0,0 @@
build-essential [platform:dpkg]
liblzma-dev [platform:dpkg]
xz-devel [platform:rpm]
zlib1g-dev [platform:dpkg]
zlib-devel [platform:rpm]
libselinux-utils [platform:rpm]
yum [platform:rpm]

View File

@ -1,10 +0,0 @@
{
"id": "horizon.key",
"horizon.key": {
"encrypted_data": "3f0zL64eSf4wlcjb0WqmKpOeP9v232NLo2H9r+kMHWaWTtzU/nd3/O91NTgf\nUL9dx+TrSVniwo32+Q68s9xnes+zNarcdEh7xuzmsKshXl3VfvTRQjsk1Qq5\n18ooFOvRBZzD3sPp+AbYavvzvZfEJY4jYcLIs4lami95KdNdziePm2bD1v5b\n2YcWgfmSuSnNI/LYuxaCx5qQvSCpFbr0YKSCURaGDnB+firpj6gom+pXvVf+\n2ao4gHEjnDtYliwBl7f1jh/LD3a3mmpiNEwEtAR+Jx34EIdo0I0TsIcRvpZ4\nLOHrSO8e0fwgCBg6lQywZg3ZZwT8g75YWyTcaZ3omdPTitUaygyY6HoM3K3R\nwJDD2mslHdH8stEbxNjy0Wz59hTN+rTf+AMr7hadUa5t36dERiQMTb2ua/iY\nNjQGa6Shl0YOp+q+FFEL78pAXcaXCEGhVyVkQ0yVzPeazAQFhHDuz4J8nNco\n9M3ZxoRkUKH2BcNFIlCf1VSPQaPc81Rs3lhy/9kjnKBMyD45iTcSPI2m8zkZ\n4hAv4PFVhkY7QVVsV4iYJadoA1w9MPQndM2Si27ovFYgCmMOvKpeFHvmuPb8\nvw0Oil0lNG9zV5J81sivKXoIFs84W8c3s06roBK8SARatVVGtIj3TNsjJ/IC\n1QsaOuM2vq6aNnANvM2NTwOwicnupE7hGu86ewrtukY1ag7gORRGmfU/A3sN\npSd8RORdMEGQ97ACS98KhAbnQ3nVq+c07JC50txRemzCkqUiVnl6Uqed5q6e\n3Qlfzqud4oWVc91ol/Q90+6ekpdSCONxbXpiglkca1slszaLh8nW2bp0zbYp\nFT61UzGGYDCSrR37dc33cbxieDFHNfpZa/iU1jf97azQokidytjnNEA/DhbB\nAkGCvwoOZRubjR56/sKYgGvz8meyWTY94RnAxEJrzFkrSybUZ8wkIPd9d8B8\nYtiWZNOS/rMM9L1fE9cRDQWrSVHLex4kJDVIG+DBvEMvXve2EaeqfwPymsHi\nRR4o9kSr4i8MxiH5we+Ko33rwQaQEplNAf5qt2st9grbfQYzsf81OBaUtF3L\n3GLACI/3CWUP7bVWfkMH9bo5Tv9KfsKlQgfLZ5Ehp+v57xGYSS/G+V0jZyFi\nwOPH88s6m715zNpH48h0qkgYJjpN/dQrSyAau1OZpY6xfY3NAPVolAOoXAkA\nnTbY6WZULBT5LO0B74XKzvdZtCe8rOqg+iyQIMSEPN3DBb+TJSzfc8m5qQyf\nQ/JUOSbCIpG/uvFFZblkiWeUYt6GDhsALhCbBYdRCv/9ZGmzUkznHBi0ScsD\nvPhkZ5MuTLTfAITP7TFaQDSirMn5Bnag8voddQR5P1ugiK4TCqVD9fuBYEck\nIHRwRPTFNxmvhDmK/5dTGQMCixo+HGdLPrz5w9AcsGQqDyRU1ZyMqL4HfXL7\nOnFrbEa5yPsZWvHETgn02SLrNXCdi3pPrrx5au3yHPp13xz9M8Hvj+a/h876\nU3irt9hxPolArJsmRkNfoUsg2tSxq8CXi/zSXftbTP9pAdgekem1aDxGdOrQ\ntiAUc8wM9Dfr0WvTlVXZZMxSPHjgT/QUL/f4XM3MP3AhgTCcTiVawiJntkQq\nYkq3AM3ZR1tIm6u+AyYy5f6F3qofozO07+6E+k7OyOO/0kpr/g7g5HxzBR2v\n4bcFYeN6mryj6hmC202zNe4wNsRJh/rs25s0/AaBbEX3BDtsES7EIIjMtmSp\nEAwfPjKFAISrv/WbCBsg+NgJAB4VSmD3AK0OZ2bEvn8QO0Y0PwPmc6nXmDut\nWoBcE84bmVM5CmtaZt8uIcHdCrDp4T/UMmvMae3QHTrOPX45OLQGvD1n0qDL\nRteC4cIhD0qYOPZszg7y1rKbjFMnc0RCbrPGBB6CrR4aWtjuf2anlqq4aPO9\nNLxL6NMIJJjLoY9DOxUYGY7diPKp/lutb8zx6ZiEJkdPvTBz08PVAPfMPfay\ndURo0Bu9vvfd4lZw2qcgIpAR2S6wY/xuf2BTI4mySeJ8DiVZmuGt8tYgl9Ub\nRVjVXkGV4Sicz32oPW4MyYmG+/oV12xHLXTkWiWp7dZ2E9VF9zGzD9MuRSBr\n9HyJQZmuikRzLW97clFjEXYXYTBn7CrXkA8oH5dhJhWMYX0sd8OTY4KtTQvH\nd0TlzOF0FGChFe3YWfJUPWZFwN1oKR5rhDHMjXq2V6MnzGDTB2q6ad+UpAj3\nBULnrLfz/iLy78m1N5i8LwT1kE5gKZgC6D4yCm/NYFA3KG1WNBYaDarUS2Zs\nhNYY3CK5znhT\n",
"iv": "AV+8lbESrFCjMXig\n",
"auth_tag": "c6EySjqyezhwtoTOAmNFKg==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "horizon.pem",
"horizon.pem": {
"encrypted_data": "nlKZmdaoD3UcAeN9MRWG5JAP/9TihwjCWtznvohy8FrEMf5VaJjNW5z4kswa\n4y//i+6c4GgRtoGfJ9+aPvwyU+vk78qwetscMmY5EoZ+SlsSkRvWXSuLV0a6\n34hG3GPnaKgvq9+dzmv88kEyK6x95wGCdHJN3eGShjTvoLICc2n6Kwiof3FT\n+qql7UDCehTK3h/5RCai2h51eL20nC0H0+Af8saCRjMYs5U9wl2AFwqBaBaO\nQS7iCag0xWu6IGwAir6jlMDhkhUhpeo3cNu0tbYeQYrC9hSde7fk8ejhsGxi\nQzmdlSaVoEzYXJbRNbhRTLbB7odc4tZmZt1yX8v4xu/N62c33+eoqlvxyx3P\nwXrJNoO7JLo0hiO3t/pmvb2l9cnBupQReABML4fkXBP61IWSx97G0nGCTY8K\njnRaQdSwTFhysYgUyq/DiRkZq/jVf1pFcN+Gn6mKsicYOw8b117UuPyQ+3yS\nV3xC//ugh3C6VNEmpIA/768GUUzX/9VRR4unbTChSVI2TFzmyfmcKqKlY9ZK\nzvV4giGvyC8Sut2cvYV1pvQYHzKzXco97GyM34R3Nko8V7XoINx468IWA3e0\nzP3nCVJnZcRe6lhxL+im9MUueabHp6pJeN6mij2cUjUQjD6UZr1xC7EzfTeR\nVwxuDgU/SzI3HgDiBVNdeFQe/WJZS/9NuWlPZADeKqK+8iLRc7novhfCibyi\n07Ml7vkuMUqj5011+m7+RZVA+0RBJoVCIHimP33GekCkdock3MUJeaLAE0KR\nzJNYZ3jWy9FNGKjvYETt3kizHjaed4NoJm5lc3WIqvETc6l9d0GR0ch5LEry\n5l9kIQk8VEx/18xUIitgsjObLm/btIAYHPv5xFxy8WSSL85pwFcQGzso1y6D\nSZVTifmYKHjo6upmtGfAiBnU9rOF7tw2kzPqkdhYbQA0qL1jvu5oBnz7CfVN\ne46psv2c+HcLvNW7YOpSGEkGIYxDBU2ZqTol/Xgx+MlsHsP7+4lnpXEi6ITa\nImjYv0IEob+Uo1pEAZAEAB5ZFFtCAcakkG2kmF01HMKXb0GJfQijrHxGbcjB\naSfuL7vuYiw6Mn2tJJopBGHjWUt+uvNvYPNgrg7PzbtIo6Gw1AejjZTMb4Qe\nhz2OeFTqbGF6N0LR7Ci0bIBww+mZA7+nX2VFjNmCV1akAvEmyNfEVR6oemMU\naqS0vmSMzvz+d7lodECDii3fgIn1IbGLuzhZLNDlFKrvOihRVUsN+DQow6yb\niPkDvYj+UQbu56dimXaCJheNwp/d7ta+LD/s1DEelvYKMYVgM1djNYOHgPyL\nriUN581WaoprEX7CavK8UVpRe4NjhVaCuIUwYbpisI6HkinlIZx33Z5ObSR1\nKCiyim/BviADlfk0\n",
"iv": "zSD5P5yNXBwg1XMh\n",
"auth_tag": "+PTbpzCL8Z9CCdQlCBlpZg==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "aodh",
"aodh": {
"encrypted_data": "3ZAJfRHrUR52u9bIjaCZHJ8+41lFtMQ=\n",
"iv": "Bx1C+ROcknzL/AQi\n",
"auth_tag": "lhyZREBnQQwhY22a/ci9aw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "ceilometer",
"ceilometer": {
"encrypted_data": "4FgQ2zK7Pkev7JzeyRqihxQngH+tqh35jg==\n",
"iv": "k+T3QVpvWlWQ30Ut\n",
"auth_tag": "/NA2evvK7si0yxwmnRJ3og==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "cinder",
"cinder": {
"encrypted_data": "1by8qBeRo74m5W3MoqRDpeevrXcRj/vEUg==\n",
"iv": "/s2YnhagoS4L7mHK\n",
"auth_tag": "6Z3rDRZhJN/p1pP15lRvJA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "dash",
"dash": {
"encrypted_data": "FMorEJGnOtIrwq3ZIv/VWrCtcwk3TYvdWQ==\n",
"iv": "nKg1lu9brBNkKrmP\n",
"auth_tag": "5gYG8p5FkbIumuHcsTU6MQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "designate",
"designate": {
"encrypted_data": "rq7N+DifbtXHQiC8s0x8Z0jm5JodjtBJzg==\n",
"iv": "E5z9mnkCS0WG9f6W\n",
"auth_tag": "Q7nnYiGaRRfGEgHTznN10A==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "glance",
"glance": {
"encrypted_data": "lYZcqw2GLqLDglm9ONo0xk+CYhDocWLXAw==\n",
"iv": "uZLZMCwhJrc6R+6l\n",
"auth_tag": "JrQJQWBQDOa32p3bmAHK7Q==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "gnocchi",
"gnocchi": {
"encrypted_data": "0ANqZgd9RLtXx4cGhrgt6aYlXuq2b7d8cQ==\n",
"iv": "8VdizNsosQfKI4Uq\n",
"auth_tag": "G9E/N4P0kTkZbkJ7Yidneg==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "heat",
"heat": {
"encrypted_data": "KhZYFaqHl1+2BGPghfN8St1MvYJ9TR5zhg==\n",
"iv": "BtutaTvxERLrI5EH\n",
"auth_tag": "Y+hr4cMfo8huhbZrtEem5w==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "horizon",
"horizon": {
"encrypted_data": "JJ7qeWAKWjFlpkBJvdbCNLRISgCieLUR/A==\n",
"iv": "qTo/3JGrAhW5MnrP\n",
"auth_tag": "MrsnHjmayCq4GXsG0uXLSw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "ironic",
"ironic": {
"encrypted_data": "O9dpq41k90vdOtFGW60p13Mki6Hp8vu+EQ==\n",
"iv": "qP8LpWlAWdyH/jxe\n",
"auth_tag": "6yM7Z1sw4wpAGjwuvluEuA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "keystone",
"keystone": {
"encrypted_data": "QWQZ5jZzkxsysPC3B/6QPvYc5tza3rZOoA==\n",
"iv": "JQydX+XgeIoCaNHt\n",
"auth_tag": "exz+tonoekz8puDjdFd3QA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "mysqlroot",
"mysqlroot": {
"encrypted_data": "pY87wT67k8N3bwTC9rerphVa6yPv/eNnQQ==\n",
"iv": "QFUJNOQUAzv37RcS\n",
"auth_tag": "v0/GwZ2VQplMr7FPkseQuw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "neutron",
"neutron": {
"encrypted_data": "EaNzE0hWsOOKRUh1ODZafzJcKlcMiqtmDA==\n",
"iv": "mr9ad3WFuUjbtYeu\n",
"auth_tag": "rakN7H8CqWc6lyz7L98idA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "nova",
"nova": {
"encrypted_data": "MIFXMvRR5B2c7eVZEB5NgSTij0cnCQiI3w==\n",
"iv": "AlH0YzOhOQTSOM5z\n",
"auth_tag": "fMDkg7q+SvsBQ72Hezp/kg==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "nova_api",
"nova_api": {
"encrypted_data": "EhXSbK/GyMbON9NDITBH13PeYFCFIvKVhA==\n",
"iv": "hNeyA7TEIyEsLcrK\n",
"auth_tag": "Rbz4mBo5yJjc5ItQc/vt3w==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "nova_cell0",
"nova_cell0": {
"encrypted_data": "0Bf80Cqrg2aKQKSPVzxVg0u7sRWZgsfwMQ==\n",
"iv": "RGIPKZTPEq7P/f1O\n",
"auth_tag": "ALzLFK5eudDu09i7fFKhQQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "placement",
"placement": {
"encrypted_data": "Jsq+ipbfdaS9xz53Aq/iZeklVrMqGwhy1T3se7FBsSk=\n",
"hmac": "4wjFbHBVa4QBUGsJtMu9aSXUW+eczfO5FPb92G/o2c4=\n",
"iv": "3rIhywqadkq3curyUuKKYA==\n",
"version": 2,
"cipher": "aes-256-cbc"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "credential_key0",
"credential_key0": {
"encrypted_data": "+f9m4yySSddKQBtexpObWadwsTdLx/i3zGh7Wk6CMyMRA20ULV+78sGxdNVV\nBNkzYYE7hvRGfwooukLMR2A2\n",
"iv": "78lsGK/GVYn0LS0n\n",
"auth_tag": "MqUKL3mM2w9HFQIW52Mmrw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "credential_key1",
"credential_key1": {
"encrypted_data": "X4ZCiHUUqW2iciwUquvYTg3l2bcUfmOoRFw6/uiDoZhiv5FC12+glQUeD8Av\n4NqqnZW4uhc1pMxfGT+QIfMv\n",
"iv": "SRUjq+ioB90M8RW5\n",
"auth_tag": "NCKVBRmyJ/Zov8u6z+OdNA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"designate_rndc": {
"encrypted_data": "9rdQuzH6xux8LHdsEDmhih6yK6wWAEhBEc8/nOm0BAVd+GoQWHH1JWM1qdkq\npbxIUUEqNym0laAFVFyxUjFE\n",
"iv": "sLHnFUuEGfVuF/KQ\n",
"auth_tag": "8jKSLquqe/ciqzZF9ku33g==\n",
"version": 3,
"cipher": "aes-256-gcm"
},
"id": "designate_rndc"
}

View File

@ -1,10 +0,0 @@
{
"id": "dispersion_auth_key",
"dispersion_auth_key": {
"encrypted_data": "HeVQA/lKX4qNEgEFt/0vCA8Toc/LAUBwExXScbNEXxAMyMgpNlwHjXBMtQQ=\n",
"iv": "eu8zcZ8A5Oig5P/o\n",
"auth_tag": "SpMmWA3LHMp+PGtYkf/xfQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "dispersion_auth_user",
"dispersion_auth_user": {
"encrypted_data": "gHo7WaSOAObn8i+0+zcDUNfnTsjvUK2D4qsLBHWJ+H3/ogSfiEhEWZXnL7s=\n",
"iv": "NVAThQOlySVxRvAs\n",
"auth_tag": "RZ+zA4MrFU5OMJYiPMGKvg==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "fernet_key0",
"fernet_key0": {
"encrypted_data": "klHq/xf9EeFPVxvSsIVYz7DNx0ZdgYiADbgczDQhkQOpiQiaxq2wcYsshQU6\nzFJsWHk51n1aV6wom1yxr5EG\n",
"iv": "wGbJPBROV0BT0GKj\n",
"auth_tag": "/uKKmB8n1Sh7IvI38gdIAQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "fernet_key1",
"fernet_key1": {
"encrypted_data": "tUt6KhUckQiU4EaP30WEDSdGPjqHxUOg+a9XV92VCcZufMzmw4/37W0fBwcq\nQn5JEDWtCXMx/YSp5eycT8ca\n",
"iv": "EAn1lrxF0JzE5HDQ\n",
"auth_tag": "lPF5aYF7UPefftSHJxWN9w==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "neutron_metadata_secret",
"neutron_metadata_secret": {
"encrypted_data": "dx5dR20bfSpeOWnpNZOdXX3k55n/Um8rCyehVd8cVs5RmW+sAC9cm1qoKZKV\npXn1\n",
"iv": "w1H81ZmPqJi61d8G\n",
"auth_tag": "yOW31q48UYMJGNq0OvMgFQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack_identity_bootstrap_token",
"openstack_identity_bootstrap_token": {
"encrypted_data": "q+XJCRy2dcnHboSNCVrK5OR/IlAt+yqrclZ5rVKWJBhbqnMu8JRs0YrJ5QDp\nqQfOwQZQ1m4=\n",
"iv": "rMo0P2WbLfZ5jF0b\n",
"auth_tag": "p+O6G0A/W8zs0ybSdyUFoQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "orchestration_auth_encryption_key",
"orchestration_auth_encryption_key": {
"encrypted_data": "AvYQus6tO1Pb4WDeat/4RoCa+EjkIKX++C3/L9yJKD+tBhwv70lh4pTzvlUb\nyEn5zxn/\n",
"iv": "8gcTvD9MWLpDm2P7\n",
"auth_tag": "CCDQkdKJj4V8zyECammPTA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-aodh",
"openstack-aodh": {
"encrypted_data": "bWQZwHj/WzfyeJdmnwAUHii1dQWi8gXeMh6W/j5hB1RE\n",
"iv": "4XMES2igC/0KyGY+\n",
"auth_tag": "tbUxSd8tpdBVqsz8HNCyxg==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-bare-metal",
"openstack-bare-metal": {
"encrypted_data": "XungldtBZTJXKXxUezYe45a0dozxLgsXMA==\n",
"iv": "s3SrfIFb7qQv44gA\n",
"auth_tag": "tAIA0ww4wqCDWxpY3otvEQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-block-storage",
"openstack-block-storage": {
"encrypted_data": "s2yk+YVL4l7s65EOhLjYnrQBsLVCTVvs+g==\n",
"iv": "6NTjfnI0Wk554zoy\n",
"auth_tag": "HQyedh4UeMmuG2FbzFGA8w==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-compute",
"openstack-compute": {
"encrypted_data": "MSIjvrMJ8zoKynVmbqN6wGH3ajDjyRBQ1g==\n",
"iv": "C8e2ZiqoBMJmpOxG\n",
"auth_tag": "iwgLvTU87qwKp/QCdId0Rw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-dns",
"openstack-dns": {
"encrypted_data": "j0em/vC2AusGXFeTvDC8HfdSfemfIbnGHw==\n",
"iv": "2hS/AGOuQiU8xZG9\n",
"auth_tag": "R6MeBi0hJ815ReTxqWai3Q==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-image",
"openstack-image": {
"encrypted_data": "MrJFU9Q7tELsfnIwTV7KSqnpG4AcRJH5Vw==\n",
"iv": "5uuInaFjGEih389e\n",
"auth_tag": "N7vpAf4VVE3xeu8/CLDnEA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-network",
"openstack-network": {
"encrypted_data": "pMz/lN8yVtGPxx4k4pz3WevMBVDSmyZQ+Q==\n",
"iv": "T6r9Z1f9DEdteOYx\n",
"auth_tag": "goz/UtgFspuvVWYqkhCt+Q==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-orchestration",
"openstack-orchestration": {
"encrypted_data": "AONHb8jlwFnR6wYYUPs2UW9z8YPbPJRYSg==\n",
"iv": "38Q/5TBLSllprHIr\n",
"auth_tag": "pp7lNVqpAPTwyZTVicB/dA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-placement",
"openstack-placement": {
"encrypted_data": "7yqlY8nn6f9g5cYAIfFCSLm76myRQ5P8Ww==\n",
"iv": "3iTD7QItyaWeA1mS\n",
"auth_tag": "FJlBLrqR0vzQ4S4HqKQn5A==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-telemetry-metric",
"openstack-telemetry-metric": {
"encrypted_data": "juCRbZ2FSKbxW16ReEAYi/3Oh7iEuFDQhg==\n",
"iv": "0UxtUNC27DuIcwEK\n",
"auth_tag": "hUeVIaa2QwsrH5bRYK0COw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-telemetry",
"openstack-telemetry": {
"encrypted_data": "H7th9qXm+tLXZ7e2rIJ1aytuWEmCRIFqTg==\n",
"iv": "yBz5NfZQzHlWGZG8\n",
"auth_tag": "x3q2zr8xrA+xZL3bVSwO9w==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "openstack-telemetry_metric",
"openstack-telemetry_metric": {
"encrypted_data": "RimQJHhxsSnI9vHNPD43KWVbfojAKDzkQFHQq6xXehD+VgYh9lwNUT+DFvz/\n",
"iv": "tbYMioDdeXAs7XY+\n",
"auth_tag": "BV3qn/iDujKxEckGq/HW1Q==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "admin",
"admin": {
"encrypted_data": "rSGFmgUQACPxmJ2mXXtkPZeltLZTXVzgzg==\n",
"iv": "Gf5d3TwsTCv9eOqy\n",
"auth_tag": "E034utXmdnB36z0+2abBAA==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "guest",
"guest": {
"encrypted_data": "rwMD8pWaNwi6yAVJmOOm69gIw0k/U3R9gg==\n",
"iv": "OFXSz4YuieFSfGdL\n",
"auth_tag": "8pxujYDPebuGsrntDl8jAw==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
{
"id": "heat_domain_admin",
"heat_domain_admin": {
"encrypted_data": "IKsB83OypkqYkR89/Rfw4lCi3FjA72FWGw==\n",
"iv": "MTk0WWG+Yk85Z1C7\n",
"auth_tag": "+W4IH3x9+ZFCTl+ZTJF7LQ==\n",
"version": 3,
"cipher": "aes-256-gcm"
}
}

View File

@ -1,10 +0,0 @@
2. Edit the ``/etc/OpenStackChef/OpenStackChef.conf`` file and complete the following
actions:
* In the ``[database]`` section, configure database access:
.. code-block:: ini
[database]
...
connection = mysql+pymysql://OpenStackChef:OPENSTACKCHEF_DBPASS@controller/OpenStackChef

View File

@ -1,75 +0,0 @@
Prerequisites
-------------
Before you install and configure the OpenStack Chef service,
you must create a database, service credentials, and API endpoints.
#. To create the database, complete these steps:
* Use the database access client to connect to the database
server as the ``root`` user:
.. code-block:: console
$ mysql -u root -p
* Create the ``OpenStackChef`` database:
.. code-block:: none
CREATE DATABASE OpenStackChef;
* Grant proper access to the ``OpenStackChef`` database:
.. code-block:: none
GRANT ALL PRIVILEGES ON OpenStackChef.* TO 'OpenStackChef'@'localhost' \
IDENTIFIED BY 'OPENSTACKCHEF_DBPASS';
GRANT ALL PRIVILEGES ON OpenStackChef.* TO 'OpenStackChef'@'%' \
IDENTIFIED BY 'OPENSTACKCHEF_DBPASS';
Replace ``OPENSTACKCHEF_DBPASS`` with a suitable password.
* Exit the database access client.
.. code-block:: none
exit;
#. Source the ``admin`` credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. To create the service credentials, complete these steps:
* Create the ``OpenStackChef`` user:
.. code-block:: console
$ openstack user create --domain default --password-prompt OpenStackChef
* Add the ``admin`` role to the ``OpenStackChef`` user:
.. code-block:: console
$ openstack role add --project service --user OpenStackChef admin
* Create the OpenStackChef service entities:
.. code-block:: console
$ openstack service create --name OpenStackChef --description "OpenStack Chef" openstack chef
#. Create the OpenStack Chef service API endpoints:
.. code-block:: console
$ openstack endpoint create --region RegionOne \
openstack chef public http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
openstack chef internal http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
openstack chef admin http://controller:XXXX/vY/%\(tenant_id\)s

View File

@ -1,271 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# TODO(ajaeger): enable PDF building, for example add 'rst2pdf.pdfbuilder'
extensions = [
'openstackdocstheme'
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Deployment Guide for OpenStack Chef Cookbooks'
openstackdocs_repo_name = 'openstack/openstack-chef'
openstackdocs_auto_name = False
openstackdocs_bug_project = 'openstack-chef'
openstackdocs_bug_tag = u'deploy-guide'
copyright = u'2019, OpenStack contributors'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["common_prerequisites.rst", "common_configure.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'deploy-guide'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'InstallGuide.tex', u'Install Guide',
u'OpenStack contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'installguide', u'Install Guide',
[u'OpenStack contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'InstallGuide', u'Install Guide',
u'OpenStack contributors', 'InstallGuide',
'This guide shows OpenStack end users how to install '
'an OpenStack cloud.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'InstallGuide', u'Install Guide',
u'OpenStack contributors')
]

View File

@ -1,26 +0,0 @@
.. _deploy:
===============================
Deploy Chef OpenStack cookbooks
===============================
Deploy Chef OpenStack cookbooks, using Chef, and test the setup using
Tempest.
* Software requirements:
* Ubuntu 18.04 LTS or CentOS 7 or Stream 8 fresh install
* `git` installed
* `chef-client` installed: https://docs.chef.io/#setup
* Hardware requirements:
* At least 4GB of memory. 8GB or more is recommended.
* At least 10GB of storage.
.. code-block:: shell-session
# git clone https://opendev.org/openstack/openstack-chef
# cd openstack-chef
# mkdir -p /etc/chef && cp .chef/encrypted_data_bag_secret /etc/chef/openstack_data_bag_secret
# chef-client -z -E allinone -r 'role[allinone]'

View File

@ -1,70 +0,0 @@
=========
Genealogy
=========
- `Releases Summary`_
- `Supermarket Releases`_
- `How to release Chef cookbooks`_
Releases Summary
================
+----------------------------+------------------------------+------------------------+
| Module Version | OpenStack Version Codename | Community Supported |
+============================+==============================+========================+
| 7.y.z | Grizzly | no - EOL (2014-03-29) |
+----------------------------+------------------------------+------------------------+
| 8.y.z | Havana | no - EOL (2014-09-30) |
+----------------------------+------------------------------+------------------------+
| 9.y.z | Icehouse | no - EOL (2015-07-02) |
+----------------------------+------------------------------+------------------------+
| 10.y.z | Juno | no - EOL (2015-12-07) |
+----------------------------+------------------------------+------------------------+
| 11.y.z | Kilo | no - EOL (2016-05-02) |
+----------------------------+------------------------------+------------------------+
| 12.y.z | Liberty | no - EOL (2016-11-17) |
+----------------------------+------------------------------+------------------------+
| 13.y.z | Mitaka | no - EOL (2017-04-10) |
+----------------------------+------------------------------+------------------------+
| 14.y.z | Newton | no - EOL (2017-10-11) |
+----------------------------+------------------------------+------------------------+
| 15.y.z | Ocata | no - EOL (2019-12-12) |
+----------------------------+------------------------------+------------------------+
| 16.y.z | Pike | no - EOL (2019-12-12) |
+----------------------------+------------------------------+------------------------+
| 17.y.z | Queens | yes |
+----------------------------+------------------------------+------------------------+
| 18.y.z | Rocky | yes |
+----------------------------+------------------------------+------------------------+
| 19.y.z | Stein | yes |
+----------------------------+------------------------------+------------------------+
| 20.y.z | Train | yes (current master) |
+----------------------------+------------------------------+------------------------+
| 21.y.z | Ussuri | Future |
+----------------------------+------------------------------+------------------------+
| 22.y.z | Victoria | Future |
+----------------------------+------------------------------+------------------------+
| 23.y.z | Wallaby | Future |
+----------------------------+------------------------------+------------------------+
| 24.y.z | Xena | Future |
+----------------------------+------------------------------+------------------------+
| 25.y.z | Yoga | Future |
+----------------------------+------------------------------+------------------------+
Supermarket releases
====================
- From Ocata on, the cookbooks are released on the Chef Supermarket_.
.. _Supermarket: https://supermarket.chef.io/users/openstack
How to release Chef cookbooks
=============================
- A core member will create the new branch based on the desired SHA.
Example: https://review.openstack.org/#/admin/projects/openstack/cookbook-openstack-compute,branches
- For all cookbooks to be released: update .gitreview and Berksfile
to stable/<release>
Example: https://review.openstack.org/547505
- Create a review with the above and propose it against the stable/<release> branch.
- Solicit for reviews and approval.

View File

@ -1,22 +0,0 @@
===============
Getting Started
===============
Learn about Chef OpenStack
==========================
* To learn about the Chef automation framework,
consult the `online documentation <https://docs.chef.io>`_ or
`Learn Chef <https://learn.chef.io>`_.
* `Supported Platforms <supported-platforms.html>`_
* `Create <quickstart.html>`_ on your own development OpenStack Cloud with `Test Kitchen <https://kitchen.ci>`_.
* `Deploy <deploy.html>`_ on your own physical or virtual machines.
* Chef OpenStack `genealogy <genealogy.html>`_.
.. toctree::
:maxdepth: 1
:hidden:
supported-platforms
quickstart
genealogy

View File

@ -1,14 +0,0 @@
=========================
OpenStack Chef deployment
=========================
.. toctree::
:maxdepth: 2
:includehidden:
get_started.rst
quickstart
deploy
genealogy
supported-platforms

View File

@ -1,102 +0,0 @@
.. _quickstart-test-kitchen:
`Kitchen`_ is a no-fuss, no BS way to get a Chef OpenStack build for:
* development of OpenStack or applications on top of it
* a reference for how the services fit together
* a simple lab environment
.. _Kitchen: https://kitchen.ci/
Kitchen builds are not recommended for production deployments, but they can work in
a pinch when you just need OpenStack.
At an absolute minimum, you should use the following resources. What is listed
is currently used in CI for the gate checks, as well as the tested minimum:
* 8 vCPU (tests as low as 4, but it tends to get CPU bound)
* 8 GB RAM (7 GB sort of works, but it's tight - expect OOM/slowness)
* 50 GB free disk space on the root partition
Recommended server resources:
* CPU/motherboard that supports `hardware-assisted virtualization`_
* 8 CPU cores
* 16 GB RAM
* 80 GB free disk space on the root partition, or 50+ GB on a blank secondary volume.
It is `possible` to perform builds within a virtual machine for
demonstration and evaluation, but your virtual machines will perform poorly.
For production workloads, multiple nodes for specific roles are recommended.
.. _hardware-assisted virtualization: https://en.wikipedia.org/wiki/Hardware-assisted_virtualization
Testing with Kitchen
--------------------
There are three basic steps to building OpenStack with Test Kitchen, with an optional first step should you need to customize your build:
* Configuration *(this step is optional)*
* Install and bootstrap the Chef Development Kit
* Run Test Kitchen
When building on a new server, it is recommended that all system
packages are updated and then rebooted into the new kernel:
.. note:: Execute the following commands and scripts as the root user.
.. code-block:: shell-session
## Ubuntu
# apt-get update
# apt-get dist-upgrade
# reboot
.. code-block:: shell-session
## CentOS
# yum upgrade
# reboot
Start by cloning the OpenStack Chef repository and changing into the root directory:
.. code-block:: shell-session
# git clone https://opendev.org/openstack/openstack-chef \
/opt/openstack-chef
# cd /opt/openstack-chef
Next, switch to the applicable branch/tag to be deployed. Note that deploying
from the head of a branch may result in an unstable build due to changes in
flight and upstream OpenStack changes. For a test (not a development) build, it
is usually best to checkout the latest tagged version.
.. code-block:: shell-session
## List all existing branches.
# git branch -av
## Checkout some stable branch
# git checkout stable/queens
.. note::
The current master release is compatible with Ubuntu 18.04
(Bionic Beaver) and CentOS 7
By default the cookbooks deploy all OpenStack services with sensible defaults
for the purpose of a gate check, development or testing system.
Deployers have the option to change how the build is configured by overriding
in the respective kitchen YAML file. This can be useful when you want to make
use of different services or test new cookbooks.
To use a different driver for Test Kitchen, such as for a multi-node
development environment, pass the ``KITCHEN_YAML`` environment variable as an
additional option to the ``kitchen`` command. For example, if you want to
deploy a containerized development environment, instead of a Vagrant AIO, then execute:
.. code-block:: shell-session
# KITCHEN_YAML=.kitchen.dokken.yml kitchen verify [centos|ubuntu|all]

View File

@ -1,17 +0,0 @@
Supported Platforms
===================
The following operating systems and versions are supported by the OpenStack cookbooks:
* Ubuntu 18.04 LTS (Bionic Beaver)
* CentOS 7 or Stream 8
The cookbooks are tested and verified to work on the Chef stable track using
the `Chef Workstation <https://www.chef.sh/docs/chef-workstation/getting-started/>`_.
Your success rate may vary with the bleeding edge. Chef 15 and older is NOT
supported.
Have a look at
`OpenStack Chef Continuous Integration <https://docs.openstack.org/openstack-chef/latest/contributor/ci.html>`_
to see what is currently tested.

View File

@ -1,8 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# this is required for the docs build jobs
sphinx>=2.0.0,!=2.1.0 # BSD
openstackdocstheme>=2.2.1 # Apache-2.0
reno>=3.1.0 # Apache-2.0

View File

@ -1,169 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config.html
# -- Chef OpenStack configuration --------------------------------------------
target_name = 'openstack-chef'
description = 'Chef OpenStack uses Chef to deploy OpenStack environments.'
previous_series_name = 'stein'
current_series_name = 'train'
# -- Project information -----------------------------------------------------
project = u'Chef OpenStack'
title = u'Chef OpenStack Documentation'
category = 'Miscellaneous'
copyright = u'2012-2018, Chef OpenStack Contributors'
author = u'Chef OpenStack Contributors'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode'
]
todo_include_docs = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/openstack-chef'
openstackdocs_auto_name = False
openstackdocs_bug_project = 'openstack-chef'
openstackdocs_bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, openstackdocs_bug_project,
description, category),
]
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
(master_doc, target_name,
title, author)
]

View File

@ -1,26 +0,0 @@
.. _ci:
######################
Continuous Integration
######################
This is a list of the CI jobs that are running against most of the Chef
OpenStack cookbooks. The code that configures Zuul jobs is hosted in
`openstack-chef <https://opendev.org/cgit/openstack/openstack-chef/tree/playbooks/>`_.
.. list-table:: **CI Jobs in Chef OpenStack**
:widths: 31 25 8 55
:header-rows: 1
* - Job name
- Description
- Voting
- If it fails
* - openstack-chef-rake
- It ensures the code follows the `Chef style guidelines <https://docs.chef.io/ruby.html>`_.
- Yes
- Read the build logs to see which part of the code does not follow the recommended patterns.
* - openstack-chef-integration
- Functional testing job that converges OpenStack, testing using Tempest.
- Yes
- Read the build logs to see where the failure originated.

View File

@ -1,39 +0,0 @@
=========
Community
=========
Governance
==========
OpenStack Chef governance is well defined and `documented`_. It documents:
* The current Project Team Lead
* The mission statement
* The repositories managed by the group
PTL duty
========
While the official OpenStack PTL duty `guide`_ is general, the OpenStack
Chef PTL is usually in charge of:
* continuity of the project. This can include reviewing or writing code.
* OpenStack cross-project liaison.
* meeting organization.
Core reviewers team
===================
OpenStack Chef has a `core reviewers`_ team, that can merge any code in our
repositories.
Release management
==================
Releases are currently managed by the release `subteam`_. This is sometimes a
PTL task. This includes updating Supermarket.
.. _documented: https://governance.openstack.org/reference/projects/openstack-chef.html
.. _core reviewers: https://review.opendev.org/#/admin/groups/1260,members
.. _subteam: https://review.opendev.org/#/admin/groups/1261,members
.. _guide: https://docs.openstack.org/project-team-guide/ptl.html

View File

@ -1,37 +0,0 @@
=============
Cookbook List
=============
Each Chef OpenStack cookbook corresponds to an OpenStack component and has its
own git repository. The cookbooks produce a number of deliverables, in order to
achieve a clearly stated objective: deploy OpenStack.
OpenStack Cookbooks
===================
* `Bare Metal <https://opendev.org/cgit/openstack/cookbook-openstack-bare-metal/>`_ (*Ironic*)
* `Block Storage <https://opendev.org/cgit/openstack/cookbook-openstack-block-storage/>`_ (*Cinder*)
* `Compute <https://opendev.org/cgit/openstack/cookbook-openstack-compute/>`_ (*Nova*)
* `Dashboard <https://opendev.org/cgit/openstack/cookbook-openstack-dashboard/>`_ (*Horizon*)
* `DNS <https://opendev.org/cgit/openstack/cookbook-openstack-dns/>`_ (*Designate*)
* `Image <https://opendev.org/cgit/openstack/cookbook-openstack-image/>`_ (*Glance*)
* `Network <https://opendev.org/cgit/openstack/cookbook-openstack-network/>`_ (*Neutron*)
* `Orchestration <https://opendev.org/cgit/openstack/cookbook-openstack-orchestration/>`_ (*Heat*)
* `Telemetry <https://opendev.org/cgit/openstack/cookbook-openstack-telemetry/>`_ (*Ceilometer*/*Gnocchi*)
Operations Cookbooks
====================
* `Database <https://opendev.org/cgit/openstack/cookbook-openstack-ops-database/>`_ (*MariaDB*/*MySQL*)
* `Messaging <https://opendev.org/cgit/openstack/cookbook-openstack-ops-messaging/>`_ (*RabbitMQ*)
Supporting Repositories
=======================
* `Client <https://opendev.org/cgit/openstack/cookbook-openstackclient>`_ (LWRPs for using fog-openstack inside Chef recipes)
* `Integration Test <https://opendev.org/cgit/openstack/cookbook-openstack-integration-test/>`_ (Build cookbook to deploy and test using Tempest)
Unmaintained Cookbooks
==============================
* `Object Storage <https://opendev.org/cgit/openstack/cookbook-openstack-object-storage/>`_ (*Swift*)
* `Data Processing <https://opendev.org/cgit/openstack/cookbook-openstack-data-processing/>`_ (*Sahara*)
* `Application Catalog <https://opendev.org/cgit/openstack/cookbook-openstack-application-catalog/>`_ (*Murano*)
* `Database as a Service <https://opendev.org/cgit/openstack/cookbook-openstack-database/>`_ (*Trove*)

View File

@ -1,143 +0,0 @@
Steps to create a stable release branch
=======================================
Awesome! We've decided as a group to create the next stable branch. Here
are some steps to remind you on how to do it.
#. Go to `each repo`_ as a core member and create the branch with the
SHA you want, usually you will just branch from master.::
git checkout master
git pull
git checkout -b stable/<release>
git push gerrit stable/<release>
#. Changes for each cookbook and repo, create a bug to tie all the
following branch work together
a. Update ``.gitreview`` to include ``defaultbranch=stable/<release>``
b. Update ``Berksfile`` to reference ``branch: 'stable/<release>'`` for each branched cookbook
c. See https://review.opendev.org/729795 for an example
#. Create a review with the above and put it up against the ``stable/<release>`` branch.
#. Get it merged in and you should be good
.. _each repo: https://governance.openstack.org/tc/reference/projects/openstack-chef.html
If you think doing this manually for all the cookbooks is a lot of work,
these commands might help you automating it (please CHECK the git diff
before you actually push something):
#. First pull all the cookbooks into one folder and then try to run
these commands one by one from the root folder (they are
intentionally separated, since they will create some changes that you
do not want to push).
.. code-block:: bash
for i in -bare-metal -block-storage client -common -compute \
-dashboard -dns -identity -image -integration-test -network \
-ops-database -ops-messaging -orchestration -telemetry ; do
git clone https://opendev.org/openstack/cookbook-openstack${i}
done
#. Check your ``sed`` version and make sure you have at least version
4.2.1 (if you are on OS X you have to install ``gnu-sed`` via
Homebrew since the one installed does work in mysterious ways).
.. code-block:: bash
export RELEASE=train
for i in $(ls | grep cookbook) ; do
cd $i
git checkout -b stable/${RELEASE}
sed -i "/opendev/a\ \ branch: 'stable\/${RELEASE}'" Berksfile
sed -i 's/opendev.*$/&,/' Berksfile
echo "defaultbranch=stable/${RELEASE}" >> .gitreview
cd ..
done
# The next one is important, since there are changes that are wrong
# and should be corrected manually (like adding the branch:
# stable/train for a non-openstack cookbook)
for i in $(ls | grep cookbook) ; do cd $i; git diff; cd .. ; done | less
# After you checked all your changes, you can go ahead, commit it and
# push it up for review.
for i in $(ls | grep cookbook) ; do
cd $i
git review -s
git commit -am "stable/${RELEASE} release patch"
git review
cd ..
done
Steps for a new master branch
-----------------------------
.. note::
These steps are also useful when making global changes that are
dependent on each other.
Now we have a new master, need to get it in sync with matching base
OpenStack release.
#. Possible infra changes for changes to the gates we want for this
release.
#. Decide on new levels of tools (Chef Workstation, Cookstyle, upstream
cookbooks), we have always be trying to move forward with these.
#. Changes for each cookbook and repo:
a. Update metadata with new major version level
c. Run ``cookstyle -a`` to fix any style issues. Run Cookstyle again
and fix any issues that couldn't be fixed automatically.
d. Update code with refs to old OpenStack release, i.e. "ocata" ->
"pike" (Common release and yum attributes, ...).
e. Update all code looking for deprecation's that can now be removed.
f. Update any package dependencies that have changed for each
component.
g. Update all spec test platforms to targeted levels we want for this
release.
It will likely be necessary to disable integration jobs from being
voting on the ``openstack-chef`` repo in order to allow to merge all
these changes. If you do so, make sure that you have one patch at the
end which depends on all others, this one should be passing all
integration jobs again before you merge anything. See this `topic`_ as
an example.
.. _topic: https://review.opendev.org/#/q/topic:train-updates+(status:open+OR+status:merged)
You will want to do this in the following order and add ``Depends-On:``
to each review to it's dependencies. Everything should depend on the
openstack-chef repo since that's where all of the tests reside and will
need to be updated. To simplify, you can chain dependencies based on
their ``metadata.rb`` dependencies. See below on specifics:
#. openstack-chef Repo
#. Common (depends on openstack-chef)
#. Client (depends on openstack-chef and Common)
#. Ops-Messaging (depends on openstack-chef)
#. Ops-Database (depends on openstack-chef)
#. Identity (depends on Client, Ops-Messaging and Ops-Database)
#. Image (depends on Identity)
#. Block-Storage (depends on Image)
#. Network (depends on Identity)
#. Compute (depends on Image and Network)
#. Dns (depends on Network)
#. Bare Metal (depends on Image and Network)
#. Orchestration (depends on Identity)
#. Telemetry (depends on Identity)
#. Dashboard (depends on Identity)
#. Integration-Test (depends on Image and Dns)

View File

@ -1,9 +0,0 @@
======================
How to submit a change
======================
Do you want to submit a change to OpenStack?
Review the `Developer's Guide`_.
.. _Developer's Guide: https://docs.openstack.org/infra/manual/developers.html

View File

@ -1,13 +0,0 @@
Contributor Guide
=================
.. toctree::
:maxdepth: 2
:includehidden:
cookbook-list
how-to-contribute
community
talk-to-us
ci
create-stable-branch

View File

@ -1,12 +0,0 @@
.. _mailing_list:
============
Mailing list
============
The mailing list is preferred, as it makes the information more readily available so that others who have the same question or issue can search for and find the answers.
All our communications should be prefixed with **[chef]** in the `mailing list`_.
.. _mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss

View File

@ -1,32 +0,0 @@
.. _meetings:
########
Meetings
########
1. `General Availability`_
2. `IRC Meetings`_
General Availability
====================
The Chef OpenStack team is distributed around the world, but comes together on
IRC_ between the hours of 14:00 UTC and 17:00 UTC.
.. _IRC: talk-to-us.html
IRC Meetings
============
We hold public meetings as needed on ``#openstack-chef`` on OFTC. If a
meeting needs to be held, it will be announced on the openstack-discuss mailing
list.
.. list-table::
:widths: 30 60
:header-rows: 1
* - Meeting Time
- Local Time
* - UTC 1500 Mondays as needed
- https://www.timeanddate.com/worldclock/converter.html

View File

@ -1,14 +0,0 @@
==========
Talk to us
==========
* Find us on `OFTC <https://oftc.net/>`_. Join the ``#openstack-chef`` channel.
* Join the conversation on the `mailing lists <mailing-list.html>`_.
* Participate in or propose `an IRC meeting <meetings.html>`_.
.. toctree::
:maxdepth: 1
:hidden:
mailing-list
meetings

View File

@ -1,17 +0,0 @@
OpenStack Chef Documentation
============================
The Chef cookbooks for OpenStack automate the building, operation and
consumption of OpenStack cloud deployments.
Contributor Guide
-----------------
.. toctree::
:maxdepth: 2
:includehidden:
contributor/index
The latest deployment guide can be found
`here <https://docs.openstack.org/project-deploy-guide/openstack-chef/latest/>`_.

View File

@ -1,53 +0,0 @@
{
"name": "allinone",
"description": "Environment used in testing the upstream cookbooks and reference Chef repository with vagrant. To be used with the vagrantfile-allinone vagrantfile. Defines the necessary attributes for a working all-in-one openstack deployment, using neutron for the networking component, and the openvswitch neutron plugin",
"default_attributes": {
"apache": {
"listen": [
]
}
},
"override_attributes": {
"openstack": {
"is_release": true,
"apt": {
"update_apt_cache": "true"
},
"telemetry": {
"conf": {
"DEFAULT": {
"meter_dispatchers": "database"
}
}
},
"dashboard": {
"server_hostname": "localhost"
},
"memcached_servers": [
"127.0.0.1:11211"
],
"mq": {
"user": "admin"
},
"network": {
"conf": {
"DEFAULT": {
"service_plugins": "router"
}
}
},
"image": {
"image_upload": true
},
"compute": {
"conf": {
"libvirt": {
"cpu_type": "none",
"virt_type": "qemu"
}
}
}
}
}
}

View File

@ -1,52 +0,0 @@
{
"name": "integration",
"description": "Chef environment file for building OpenStack in CI settings. Certain Tempest services are disabled in this scenario",
"default_attributes": {
"apache": {
"listen": [
]
},
"openstack": {
"network": {
"conf": {
"DEFAULT": {
"service_plugins": "router"
}
}
}
}
},
"override_attributes": {
"openstack": {
"is_release": true,
"telemetry": {
"conf": {
"DEFAULT": {
"meter_dispatchers": "database"
}
}
},
"dashboard": {
"server_hostname": "localhost"
},
"memcached_servers": [
"127.0.0.1:11211"
],
"mq": {
"user": "admin"
},
"image": {
"image_upload": true
},
"compute": {
"conf": {
"libvirt": {
"cpu_type": "none",
"virt_type": "qemu"
}
}
}
}
}
}

View File

@ -1,195 +0,0 @@
{
"name": "multinode",
"default_attributes": {
"apache": {
"listen": [
]
}
},
"override_attributes": {
"openstack": {
"is_release": true,
"apt": {
"update_apt_cache": true
},
"telemetry": {
"conf": {
"DEFAULT": {
"meter_dispatchers": "database"
}
}
},
"integration-test": {
"conf": {
"service_available": {
"ceilometer": false,
"heat": false,
"horizon": false
}
}
},
"endpoints": {
"db": {
"host": "192.168.101.60"
},
"mq": {
"host": "192.168.101.60"
},
"internal": {
"bare_metal": {
"host": "192.168.101.60"
},
"identity": {
"host": "192.168.101.60"
},
"network": {
"host": "192.168.101.60"
},
"image_api": {
"host": "192.168.101.60"
},
"block-storage": {
"host": "192.168.101.60"
},
"compute-api": {
"host": "192.168.101.60"
},
"compute-metadata-api": {
"host": "192.168.101.60"
},
"compute-novnc": {
"host": "192.168.101.60"
},
"orchestration-api": {
"host": "192.168.101.60"
},
"orchestration-api-cfn": {
"host": "192.168.101.60"
},
"placement-api": {
"host": "192.168.101.60"
}
},
"public": {
"bare_metal": {
"host": "192.168.101.60"
},
"identity": {
"host": "192.168.101.60"
},
"network": {
"host": "192.168.101.60"
},
"image_api": {
"host": "192.168.101.60"
},
"block-storage": {
"host": "192.168.101.60"
},
"compute-api": {
"host": "192.168.101.60"
},
"compute-metadata-api": {
"host": "192.168.101.60"
},
"compute-novnc": {
"host": "192.168.101.60"
},
"orchestration-api": {
"host": "192.168.101.60"
},
"orchestration-api-cfn": {
"host": "192.168.101.60"
},
"placement-api": {
"host": "192.168.101.60"
}
}
},
"bind_service": {
"db": {
"host": "192.168.101.60"
},
"mq": {
"host": "192.168.101.60"
},
"public": {
"identity": {
"host": "0.0.0.0"
}
},
"internal": {
"identity": {
"host": "0.0.0.0"
}
},
"all": {
"bare_metal": {
"host": "0.0.0.0"
},
"network": {
"host": "0.0.0.0"
},
"image_api": {
"host": "0.0.0.0"
},
"block-storage": {
"host": "0.0.0.0"
},
"compute-api": {
"host": "0.0.0.0"
},
"compute-metadata-api": {
"host": "0.0.0.0"
},
"compute-novnc": {
"host": "0.0.0.0"
},
"orchestration-api": {
"host": "0.0.0.0"
},
"orchestration-api-cfn": {
"host": "0.0.0.0"
},
"placement-api": {
"host": "0.0.0.0"
}
}
},
"dashboard": {
"server_hostname": "controller.example.net"
},
"memcached_servers": [
"192.168.101.60:11211"
],
"mq": {
"user": "admin"
},
"network": {
"conf": {
"DEFAULT": {
"service_plugins": "router"
},
"transport_url": {
"rabbit_host": "192.168.101.60"
}
}
},
"image": {
"image_upload": true
},
"compute": {
"conf": {
"libvirt": {
"cpu_type": "none",
"virt_type": "qemu"
},
"transport_url": {
"rabbit_host": "192.168.101.60"
}
}
}
}
}
}

View File

@ -1,142 +0,0 @@
<%
public_ip = "10.10.0.81"
%>
---
driver:
name: vagrant
customize:
cpus: 4
memory: 8192
network:
- ["forwarded_port", {guest: 443, host: 9443, auto_correct: true}]
- ["private_network", {ip: <%= public_ip %>}]
provisioner:
name: chef_zero
# You may wish to disable always updating cookbooks in CI or other testing
# environments.
# For example:
# always_update_cookbooks: <%= !ENV['CI'] %>
always_update_cookbooks: true
product_name: <%= ENV['CHEF_PRODUCT_NAME'] || 'chef' %>
product_version: 17
deprecations_as_errors: true
multiple_converge: 2
# Copy secret to /tmp/kitchen on test VM. Kitchen tries to gather secrets
# before any recipes had a chance to run -> we cannot use a recipe to put the
# secrets file in place.
encrypted_data_bag_secret_key_path: .chef/encrypted_data_bag_secret
roles_path: roles
environments_path: environments
attributes:
openstack:
secret:
key_path: /tmp/kitchen/encrypted_data_bag_secret
client_rb:
environment: integration
treat_deprecation_warnings_as_errors: true
resource_cloning: false
chef_license: accept
verifier:
name: inspec
platforms:
- name: ubuntu-18.04
driver:
box: bento/ubuntu-18.04
- name: centos-7
driver:
box: bento/centos-7
- name: centos-stream-8
driver:
box: bento/centos-stream-8
suites:
- name: default
run_list:
- recipe[openstack_test]
- role[allinone]
- role[minimal_test]
provisioner:
client_rb:
environment: allinone
- name: minimal
run_list:
- recipe[openstack_test]
- role[minimal]
- role[minimal_test]
- name: common
run_list:
- recipe[openstack_test]
- role[common]
- role[common_test]
- name: openstackclient
run_list:
- recipe[openstack_test]
- role[openstackclient]
- recipe[openstack_test::openstackclient]
- name: ops-database
run_list:
- recipe[openstack_test]
- role[ops_database]
- name: ops-messaging
run_list:
- recipe[openstack_test]
- role[ops_messaging]
- name: identity
run_list:
- recipe[openstack_test]
- role[identity]
- role[identity_test]
- name: image
run_list:
- recipe[openstack_test]
- role[image]
- role[image_test]
- name: network
run_list:
- recipe[openstack_test]
- role[identity]
- role[network]
- role[network_test]
- name: compute
run_list:
- recipe[openstack_test]
- role[compute]
- role[compute_test]
- name: orchestration
run_list:
- recipe[openstack_test]
- role[orchestration]
- role[orchestration_test]
- name: block-storage
run_list:
- recipe[openstack_test]
- role[block_storage]
- role[block_storage_test]
- name: bare-metal
run_list:
- recipe[openstack_test]
- role[bare_metal]
- role[bare_metal_test]
- name: telemetry
run_list:
- recipe[openstack_test]
- role[telemetry]
- role[telemetry_test]
- name: dns
run_list:
- recipe[openstack_test]
- role[dns]
- role[dns_test]
- name: dashboard
run_list:
- recipe[openstack_test]
- role[dashboard]
- role[dashboard_test]
- name: integration
run_list:
- recipe[openstack_test]
- role[integration]
- role[integration_test]

View File

@ -1,8 +0,0 @@
- hosts: all
roles:
- revoke-sudo
tasks:
- name: run delivery local
shell:
cmd: chef exec delivery local all
chdir: '{{ zuul.project.src_dir }}'

View File

@ -1,10 +0,0 @@
- hosts: all
tasks:
- name: run integration
shell:
cmd: /opt/chef/embedded/bin/rake integration
chdir: '{{ ansible_user_dir }}/src/opendev.org/openstack/openstack-chef'
environment:
WORKSPACE: '{{ ansible_user_dir }}'
PROJECT_DIR: '{{ ansible_user_dir }}/{{ zuul.project.src_dir }}'
PROJECT_NAME: '{{ zuul.project.short_name }}'

View File

@ -1,11 +0,0 @@
- hosts: all
tasks:
- name: run integration
shell:
cmd: /opt/chef/embedded/bin/rake integration
chdir: '{{ ansible_user_dir }}/src/opendev.org/openstack/openstack-chef'
environment:
CHEF_MINIMAL: 'yes'
WORKSPACE: '{{ ansible_user_dir }}'
PROJECT_DIR: '{{ ansible_user_dir }}/{{ zuul.project.src_dir }}'
PROJECT_NAME: '{{ zuul.project.short_name }}'

View File

@ -1,7 +0,0 @@
- hosts: all
tasks:
- name: Collect logs
synchronize:
dest: '{{ zuul.executor.log_root }}/{{ inventory_hostname }}'
mode: pull
src: '{{ ansible_user_dir }}/logs/'

View File

@ -1,21 +0,0 @@
- hosts: all
roles:
- bindep
vars:
bindep_file: /home/zuul/src/opendev.org/openstack/openstack-chef/bindep.txt
- hosts: all
vars:
release: 21.10.640
chef_workstation: chef-workstation_{{ release }}-1_amd64.deb
tasks:
- name: Fetch chef-workstation package
get_url:
dest: /tmp/{{ chef_workstation }}
url: https://packages.chef.io/files/stable/chef-workstation/{{ release }}/ubuntu/18.04/{{ chef_workstation }}
- name: Install chef-workstation package
shell: dpkg -i /tmp/{{ chef_workstation }}
become: yes
- name: Show chef environment, accept chef license as side effect
shell: chef env --chef-license accept
become: yes

View File

@ -1,6 +0,0 @@
- hosts: all
roles:
- bindep
- install-chef
vars:
bindep_file: /home/zuul/src/opendev.org/openstack/openstack-chef/bindep.txt

View File

@ -1,8 +0,0 @@
- hosts: all
roles:
- revoke-sudo
tasks:
- name: run rake tests
shell:
cmd: chef exec rake
chdir: '{{ zuul.project.src_dir }}'

View File

@ -1,8 +0,0 @@
Install a chef-client implementation
**Role Variables**
.. zuul:rolevar:: openstack_chef_client_type
:default: chef
The type of client to install, can be either ``chef`` or ``cinc``.

View File

@ -1,3 +0,0 @@
openstack_chef_client_type: chef
openstack_chef_client_version: 17
openstack_selinux_setenforce: 0

View File

@ -1,14 +0,0 @@
chef-install.sh
===============
`Chef Omnitruck installation script`_ which is used directly by Zuul to
install the chef-client for the integration jobs. Taken from
https://omnitruck.chef.io/install.sh_.
.. _Chef Omnitruck installation script: https://docs.chef.io/install_omnibus.html
cinc-install.sh
===============
Cinc Omnitruck installation script, taken from
https://omnitruck.cinc.sh/install.sh_.

View File

@ -1,790 +0,0 @@
#!/bin/sh
# WARNING: REQUIRES /bin/sh
#
# - must run on /bin/sh on solaris 9
# - must run on /bin/sh on AIX 6.x
#
# Copyright:: Copyright (c) 2010-2015 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# helpers.sh
############
# This section has some helper functions to make life easier.
#
# Outputs:
# $tmp_dir: secure-ish temp directory that can be used during installation.
############
# Check whether a command exists - returns 0 if it does, 1 if it does not
exists() {
if command -v $1 >/dev/null 2>&1
then
return 0
else
return 1
fi
}
# Output the instructions to report bug about this script
report_bug() {
echo "Version: $version"
echo ""
echo "Please file a Bug Report at https://github.com/chef/omnitruck/issues/new"
echo "Alternatively, feel free to open a Support Ticket at https://www.chef.io/support/tickets"
echo "More Chef support resources can be found at https://www.chef.io/support"
echo ""
echo "Please include as many details about the problem as possible i.e., how to reproduce"
echo "the problem (if possible), type of the Operating System and its version, etc.,"
echo "and any other relevant details that might help us with troubleshooting."
echo ""
}
checksum_mismatch() {
echo "Package checksum mismatch!"
report_bug
exit 1
}
unable_to_retrieve_package() {
echo "Unable to retrieve a valid package!"
report_bug
echo "Metadata URL: $metadata_url"
if test "x$download_url" != "x"; then
echo "Download URL: $download_url"
fi
if test "x$stderr_results" != "x"; then
echo "\nDEBUG OUTPUT FOLLOWS:\n$stderr_results"
fi
exit 1
}
http_404_error() {
echo "Omnitruck artifact does not exist for version $version on platform $platform"
echo ""
echo "Either this means:"
echo " - We do not support $platform"
echo " - We do not have an artifact for $version"
echo ""
echo "This is often the latter case due to running a prerelease or RC version of chef"
echo "or a gem version which was only pushed to rubygems and not omnitruck."
echo ""
echo "You may be able to set your knife[:bootstrap_version] to the most recent stable"
echo "release of Chef to fix this problem (or the most recent stable major version number)."
echo ""
echo "In order to test the version parameter, adventurous users may take the Metadata URL"
echo "below and modify the '&v=<number>' parameter until you successfully get a URL that"
echo "does not 404 (e.g. via curl or wget). You should be able to use '&v=11' or '&v=12'"
echo "succesfully."
echo ""
echo "If you cannot fix this problem by setting the bootstrap_version, it probably means"
echo "that $platform is not supported."
echo ""
# deliberately do not call report_bug to suppress bug report noise.
echo "Metadata URL: $metadata_url"
if test "x$download_url" != "x"; then
echo "Download URL: $download_url"
fi
if test "x$stderr_results" != "x"; then
echo "\nDEBUG OUTPUT FOLLOWS:\n$stderr_results"
fi
exit 1
}
capture_tmp_stderr() {
# spool up /tmp/stderr from all the commands we called
if test -f "$tmp_dir/stderr"; then
output=`cat $tmp_dir/stderr`
stderr_results="${stderr_results}\nSTDERR from $1:\n\n$output\n"
rm $tmp_dir/stderr
fi
}
# do_wget URL FILENAME
do_wget() {
echo "trying wget..."
wget --user-agent="User-Agent: mixlib-install/3.11.5" -O "$2" "$1" 2>$tmp_dir/stderr
rc=$?
# check for 404
grep "ERROR 404" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "wget"
return 1
fi
return 0
}
# do_curl URL FILENAME
do_curl() {
echo "trying curl..."
curl -A "User-Agent: mixlib-install/3.11.5" --retry 5 -sL -D $tmp_dir/stderr "$1" > "$2"
rc=$?
# check for 404
grep "404 Not Found" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "curl"
return 1
fi
return 0
}
# do_fetch URL FILENAME
do_fetch() {
echo "trying fetch..."
fetch --user-agent="User-Agent: mixlib-install/3.11.5" -o "$2" "$1" 2>$tmp_dir/stderr
# check for bad return status
test $? -ne 0 && return 1
return 0
}
# do_perl URL FILENAME
do_perl() {
echo "trying perl..."
perl -e 'use LWP::Simple; getprint($ARGV[0]);' "$1" > "$2" 2>$tmp_dir/stderr
rc=$?
# check for 404
grep "404 Not Found" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "perl"
return 1
fi
return 0
}
# do_python URL FILENAME
do_python() {
echo "trying python..."
python -c "import sys,urllib2; sys.stdout.write(urllib2.urlopen(urllib2.Request(sys.argv[1], headers={ 'User-Agent': 'mixlib-install/3.11.5' })).read())" "$1" > "$2" 2>$tmp_dir/stderr
rc=$?
# check for 404
grep "HTTP Error 404" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "python"
return 1
fi
return 0
}
# returns 0 if checksums match
do_checksum() {
if exists sha256sum; then
echo "Comparing checksum with sha256sum..."
checksum=`sha256sum $1 | awk '{ print $1 }'`
return `test "x$checksum" = "x$2"`
elif exists shasum; then
echo "Comparing checksum with shasum..."
checksum=`shasum -a 256 $1 | awk '{ print $1 }'`
return `test "x$checksum" = "x$2"`
else
echo "WARNING: could not find a valid checksum program, pre-install shasum or sha256sum in your O/S image to get valdation..."
return 0
fi
}
# do_download URL FILENAME
do_download() {
echo "downloading $1"
echo " to file $2"
url=`echo $1`
if test "x$platform" = "xsolaris2"; then
if test "x$platform_version" = "x5.9" -o "x$platform_version" = "x5.10"; then
# solaris 9 lacks openssl, solaris 10 lacks recent enough credentials - your base O/S is completely insecure, please upgrade
url=`echo $url | sed -e 's/https/http/'`
fi
fi
# we try all of these until we get success.
# perl, in particular may be present but LWP::Simple may not be installed
if exists wget; then
do_wget $url $2 && return 0
fi
if exists curl; then
do_curl $url $2 && return 0
fi
if exists fetch; then
do_fetch $url $2 && return 0
fi
if exists perl; then
do_perl $url $2 && return 0
fi
if exists python; then
do_python $url $2 && return 0
fi
unable_to_retrieve_package
}
# install_file TYPE FILENAME
# TYPE is "rpm", "deb", "solaris", "sh", etc.
install_file() {
echo "Installing $project $version"
case "$1" in
"rpm")
if test "x$platform" = "xnexus" || test "x$platform" = "xios_xr"; then
echo "installing with yum..."
yum install -yv "$2"
else
echo "installing with rpm..."
rpm -Uvh --oldpackage --replacepkgs "$2"
fi
;;
"deb")
echo "installing with dpkg..."
dpkg -i "$2"
;;
"bff")
echo "installing with installp..."
installp -aXYgd "$2" all
;;
"solaris")
echo "installing with pkgadd..."
echo "conflict=nocheck" > $tmp_dir/nocheck
echo "action=nocheck" >> $tmp_dir/nocheck
echo "mail=" >> $tmp_dir/nocheck
pkgrm -a $tmp_dir/nocheck -n $project >/dev/null 2>&1 || true
pkgadd -G -n -d "$2" -a $tmp_dir/nocheck $project
;;
"pkg")
echo "installing with installer..."
cd / && /usr/sbin/installer -pkg "$2" -target /
;;
"dmg")
echo "installing dmg file..."
hdiutil detach "/Volumes/chef_software" >/dev/null 2>&1 || true
hdiutil attach "$2" -mountpoint "/Volumes/chef_software"
cd / && /usr/sbin/installer -pkg `find "/Volumes/chef_software" -name \*.pkg` -target /
hdiutil detach "/Volumes/chef_software"
;;
"sh" )
echo "installing with sh..."
sh "$2"
;;
"p5p" )
echo "installing p5p package..."
pkg install -g "$2" $project
;;
*)
echo "Unknown filetype: $1"
report_bug
exit 1
;;
esac
if test $? -ne 0; then
echo "Installation failed"
report_bug
exit 1
fi
}
if test "x$TMPDIR" = "x"; then
tmp="/tmp"
else
tmp=$TMPDIR
fi
# secure-ish temp dir creation without having mktemp available (DDoS-able but not expliotable)
tmp_dir="$tmp/install.sh.$$"
(umask 077 && mkdir $tmp_dir) || exit 1
############
# end of helpers.sh
############
# script_cli_parameters.sh
############
# This section reads the CLI parameters for the install script and translates
# them to the local parameters to be used later by the script.
#
# Outputs:
# $version: Requested version to be installed.
# $channel: Channel to install the product from
# $project: Project to be installed
# $cmdline_filename: Name of the package downloaded on local disk.
# $cmdline_dl_dir: Name of the directory downloaded package will be saved to on local disk.
# $install_strategy: Method of package installations. default strategy is to always install upon exec. Set to "once" to skip if project is installed
# $download_url_override: Install package downloaded from a direct URL.
# $checksum: SHA256 for download_url_override file (optional)
############
# Defaults
channel="stable"
project="chef"
while getopts pnv:c:f:P:d:s:l:a opt
do
case "$opt" in
v) version="$OPTARG";;
c) channel="$OPTARG";;
p) channel="current";; # compat for prerelease option
n) channel="current";; # compat for nightlies option
f) cmdline_filename="$OPTARG";;
P) project="$OPTARG";;
d) cmdline_dl_dir="$OPTARG";;
s) install_strategy="$OPTARG";;
l) download_url_override="$OPTARG";;
a) checksum="$OPTARG";;
\?) # unknown flag
echo >&2 \
"usage: $0 [-P project] [-c release_channel] [-v version] [-f filename | -d download_dir] [-s install_strategy] [-l download_url_override] [-a checksum]"
exit 1;;
esac
done
shift `expr $OPTIND - 1`
if test -d "/opt/$project" && test "x$install_strategy" = "xonce"; then
echo "$project installation detected"
echo "install_strategy set to 'once'"
echo "Nothing to install"
exit
fi
# platform_detection.sh
############
# This section makes platform detection compatible with omnitruck on the system
# it runs.
#
# Outputs:
# $platform: Name of the platform.
# $platform_version: Version of the platform.
# $machine: System's architecture.
############
#
# Platform and Platform Version detection
#
# NOTE: This should now match ohai platform and platform_version matching.
# do not invented new platform and platform_version schemas, just make this behave
# like what ohai returns as platform and platform_version for the server.
#
# ALSO NOTE: Do not mangle platform or platform_version here. It is less error
# prone and more future-proof to do that in the server, and then all omnitruck clients
# will 'inherit' the changes (install.sh is not the only client of the omnitruck
# endpoint out there).
#
machine=`uname -m`
os=`uname -s`
if test -f "/etc/lsb-release" && grep -q DISTRIB_ID /etc/lsb-release && ! grep -q wrlinux /etc/lsb-release; then
platform=`grep DISTRIB_ID /etc/lsb-release | cut -d "=" -f 2 | tr '[A-Z]' '[a-z]'`
platform_version=`grep DISTRIB_RELEASE /etc/lsb-release | cut -d "=" -f 2`
if test "$platform" = "\"cumulus linux\""; then
platform="cumulus_linux"
elif test "$platform" = "\"cumulus networks\""; then
platform="cumulus_networks"
fi
elif test -f "/etc/debian_version"; then
platform="debian"
platform_version=`cat /etc/debian_version`
elif test -f "/etc/Eos-release"; then
# EOS may also contain /etc/redhat-release so this check must come first.
platform=arista_eos
platform_version=`awk '{print $4}' /etc/Eos-release`
machine="i386"
elif test -f "/etc/redhat-release"; then
platform=`sed 's/^\(.\+\) release.*/\1/' /etc/redhat-release | tr '[A-Z]' '[a-z]'`
platform_version=`sed 's/^.\+ release \([.0-9]\+\).*/\1/' /etc/redhat-release`
if test "$platform" = "xenserver"; then
# Current XenServer 6.2 is based on CentOS 5, platform is not reset to "el" server should hanlde response
platform="xenserver"
else
# FIXME: use "redhat"
platform="el"
fi
elif test -f "/etc/system-release"; then
platform=`sed 's/^\(.\+\) release.\+/\1/' /etc/system-release | tr '[A-Z]' '[a-z]'`
platform_version=`sed 's/^.\+ release \([.0-9]\+\).*/\1/' /etc/system-release | tr '[A-Z]' '[a-z]'`
case $platform in amazon*) # sh compat method of checking for a substring
platform="el"
. /etc/os-release
platform_version=$VERSION_ID
if test "$platform_version" = "2"; then
platform_version="7"
else
# VERSION_ID will match YYYY.MM for Amazon Linux AMIs
platform_version="6"
fi
esac
# Apple OS X
elif test -f "/usr/bin/sw_vers"; then
platform="mac_os_x"
# Matching the tab-space with sed is error-prone
platform_version=`sw_vers | awk '/^ProductVersion:/ { print $2 }' | cut -d. -f1,2`
# x86_64 Apple hardware often runs 32-bit kernels (see OHAI-63)
x86_64=`sysctl -n hw.optional.x86_64`
if test $x86_64 -eq 1; then
machine="x86_64"
fi
elif test -f "/etc/release"; then
machine=`/usr/bin/uname -p`
if grep -q SmartOS /etc/release; then
platform="smartos"
platform_version=`grep ^Image /etc/product | awk '{ print $3 }'`
else
platform="solaris2"
platform_version=`/usr/bin/uname -r`
fi
elif test -f "/etc/SuSE-release"; then
if grep -q 'Enterprise' /etc/SuSE-release;
then
platform="sles"
platform_version=`awk '/^VERSION/ {V = $3}; /^PATCHLEVEL/ {P = $3}; END {print V "." P}' /etc/SuSE-release`
else
platform="suse"
platform_version=`awk '/^VERSION =/ { print $3 }' /etc/SuSE-release`
fi
elif test "x$os" = "xFreeBSD"; then
platform="freebsd"
platform_version=`uname -r | sed 's/-.*//'`
elif test "x$os" = "xAIX"; then
platform="aix"
platform_version="`uname -v`.`uname -r`"
machine="powerpc"
elif test -f "/etc/os-release"; then
. /etc/os-release
if test "x$CISCO_RELEASE_INFO" != "x"; then
. $CISCO_RELEASE_INFO
fi
platform=$ID
platform_version=$VERSION
fi
if test "x$platform" = "x"; then
echo "Unable to determine platform version!"
report_bug
exit 1
fi
#
# NOTE: platform manging in the install.sh is DEPRECATED
#
# - install.sh should be true to ohai and should not remap
# platform or platform versions.
#
# - remapping platform and mangling platform version numbers is
# now the complete responsibility of the server-side endpoints
#
major_version=`echo $platform_version | cut -d. -f1`
case $platform in
# FIXME: should remove this case statement completely
"el")
# FIXME: "el" is deprecated, should use "redhat"
platform_version=$major_version
;;
"debian")
if test "x$major_version" = "x5"; then
# This is here for potential back-compat.
# We do not have 5 in versions we publish for anymore but we
# might have it for earlier versions.
platform_version="6"
else
platform_version=$major_version
fi
;;
"freebsd")
platform_version=$major_version
;;
"sles")
platform_version=$major_version
;;
"suse")
platform_version=$major_version
;;
esac
# normalize the architecture we detected
case $machine in
"x86_64"|"amd64"|"x64")
machine="x86_64"
;;
"i386"|"i86pc"|"x86"|"i686")
machine="i386"
;;
"sparc"|"sun4u"|"sun4v")
machine="sparc"
;;
esac
if test "x$platform_version" = "x"; then
echo "Unable to determine platform version!"
report_bug
exit 1
fi
if test "x$platform" = "xsolaris2"; then
# hack up the path on Solaris to find wget, pkgadd
PATH=/usr/sfw/bin:/usr/sbin:$PATH
export PATH
fi
echo "$platform $platform_version $machine"
############
# end of platform_detection.sh
############
# All of the download utilities in this script load common proxy env vars.
# If variables are set they will override any existing env vars.
# Otherwise, default proxy env vars will be loaded by the respective
# download utility.
if test "x$https_proxy" != "x"; then
echo "setting https_proxy: $https_proxy"
export HTTPS_PROXY=$https_proxy
export https_proxy=$https_proxy
fi
if test "x$http_proxy" != "x"; then
echo "setting http_proxy: $http_proxy"
export HTTP_PROXY=$http_proxy
export http_proxy=$http_proxy
fi
if test "x$ftp_proxy" != "x"; then
echo "setting ftp_proxy: $ftp_proxy"
export FTP_PROXY=$ftp_proxy
export ftp_proxy=$ftp_proxy
fi
if test "x$no_proxy" != "x"; then
echo "setting no_proxy: $no_proxy"
export NO_PROXY=$no_proxy
export no_proxy=$no_proxy
fi
# fetch_metadata.sh
############
# This section calls omnitruck to get the information about the build to be
# installed.
#
# Inputs:
# $channel:
# $project:
# $version:
# $platform:
# $platform_version:
# $machine:
# $tmp_dir:
#
# Outputs:
# $download_url:
# $sha256:
############
if test "x$download_url_override" = "x"; then
echo "Getting information for $project $channel $version for $platform..."
metadata_filename="$tmp_dir/metadata.txt"
metadata_url="https://omnitruck.chef.io/$channel/$project/metadata?v=$version&p=$platform&pv=$platform_version&m=$machine"
do_download "$metadata_url" "$metadata_filename"
cat "$metadata_filename"
echo ""
# check that all the mandatory fields in the downloaded metadata are there
if grep '^url' $metadata_filename > /dev/null && grep '^sha256' $metadata_filename > /dev/null; then
echo "downloaded metadata file looks valid..."
else
echo "downloaded metadata file is corrupted or an uncaught error was encountered in downloading the file..."
# this generally means one of the download methods downloaded a 404 or something like that and then reported a successful exit code,
# and this should be fixed in the function that was doing the download.
report_bug
exit 1
fi
download_url=`awk '$1 == "url" { print $2 }' "$metadata_filename"`
sha256=`awk '$1 == "sha256" { print $2 }' "$metadata_filename"`
else
download_url=$download_url_override
# Set sha256 to empty string if checksum not set
sha256=${checksum=""}
fi
############
# end of fetch_metadata.sh
############
# fetch_package.sh
############
# This section fetchs a package from $download_url and verifies its metadata.
#
# Inputs:
# $download_url:
# $tmp_dir:
# Optional Inputs:
# $cmdline_filename: Name of the package downloaded on local disk.
# $cmdline_dl_dir: Name of the directory downloaded package will be saved to on local disk.
#
# Outputs:
# $download_filename: Name of the downloaded file on local disk.
# $filetype: Type of the file downloaded.
############
filename=`echo $download_url | sed -e 's/^.*\///'`
filetype=`echo $filename | sed -e 's/^.*\.//'`
# use either $tmp_dir, the provided directory (-d) or the provided filename (-f)
if test "x$cmdline_filename" != "x"; then
download_filename="$cmdline_filename"
elif test "x$cmdline_dl_dir" != "x"; then
download_filename="$cmdline_dl_dir/$filename"
else
download_filename="$tmp_dir/$filename"
fi
# ensure the parent directory where to download the installer always exists
download_dir=`dirname $download_filename`
(umask 077 && mkdir -p $download_dir) || exit 1
# check if we have that file locally available and if so verify the checksum
# Use cases
# 1) metadata - new download
# 2) metadata - cached download when cmdline_dl_dir set
# 3) url override - no checksum new download
# 4) url override - with checksum new download
# 5) url override - with checksum cached download when cmdline_dl_dir set
cached_file_available="false"
verify_checksum="true"
if test -f $download_filename; then
echo "$download_filename exists"
cached_file_available="true"
fi
if test "x$download_url_override" != "x"; then
echo "Download URL override specified"
if test "x$cached_file_available" = "xtrue"; then
echo "Verifying local file"
if test "x$sha256" = "x"; then
echo "Checksum not specified, ignoring existing file"
cached_file_available="false" # download new file
verify_checksum="false" # no checksum to compare after download
elif do_checksum "$download_filename" "$sha256"; then
echo "Checksum match, using existing file"
cached_file_available="true" # don't need to download file
verify_checksum="false" # don't need to checksum again
else
echo "Checksum mismatch, ignoring existing file"
cached_file_available="false" # download new file
verify_checksum="true" # checksum new downloaded file
fi
else
echo "$download_filename not found"
cached_file_available="false" # download new file
if test "x$sha256" = "x"; then
verify_checksum="false" # no checksum to compare after download
else
verify_checksum="true" # checksum new downloaded file
fi
fi
fi
if test "x$cached_file_available" != "xtrue"; then
do_download "$download_url" "$download_filename"
fi
if test "x$verify_checksum" = "xtrue"; then
do_checksum "$download_filename" "$sha256" || checksum_mismatch
fi
############
# end of fetch_package.sh
############
# install_package.sh
############
# Installs a package and removed the temp directory.
#
# Inputs:
# $download_filename: Name of the file to be installed.
# $filetype: Type of the file to be installed.
# $version: The version requested. Used only for warning user if not set.
############
if test "x$version" = "x" -a "x$CI" != "xtrue"; then
echo
echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
echo
echo "You are installing an omnibus package without a version pin. If you are installing"
echo "on production servers via an automated process this is DANGEROUS and you will"
echo "be upgraded without warning on new releases, even to new major releases."
echo "Letting the version float is only appropriate in desktop, test, development or"
echo "CI/CD environments."
echo
echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
echo
fi
install_file $filetype "$download_filename"
if test "x$tmp_dir" != "x"; then
rm -r "$tmp_dir"
fi
############
# end of install_package.sh
############

View File

@ -1,798 +0,0 @@
#!/bin/sh
# WARNING: REQUIRES /bin/sh
#
# - must run on /bin/sh on solaris 9
# - must run on /bin/sh on AIX 6.x
#
# Copyright:: Copyright (c) 2010-2018 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# helpers.sh
############
# This section has some helper functions to make life easier.
#
# Outputs:
# $tmp_dir: secure-ish temp directory that can be used during installation.
############
# Check whether a command exists - returns 0 if it does, 1 if it does not
exists() {
if command -v $1 >/dev/null 2>&1
then
return 0
else
return 1
fi
}
# Output the instructions to report bug about this script
report_bug() {
echo "Version: $version"
echo ""
echo "Please file a Bug Report at https://gitlab.com/cinc-project/mixlib-install/issues"
echo "Alternatively, feel free to open a Support Ticket at https://gitlab.com/groups/cinc-project/-/issues"
echo "More Cinc support resources can be found at https://www.cinc.sh/support"
echo ""
echo "Please include as many details about the problem as possible i.e., how to reproduce"
echo "the problem (if possible), type of the Operating System and its version, etc.,"
echo "and any other relevant details that might help us with troubleshooting."
echo ""
}
checksum_mismatch() {
echo "Package checksum mismatch!"
report_bug
exit 1
}
unable_to_retrieve_package() {
echo "Unable to retrieve a valid package!"
report_bug
echo "Metadata URL: $metadata_url"
if test "x$download_url" != "x"; then
echo "Download URL: $download_url"
fi
if test "x$stderr_results" != "x"; then
echo "\nDEBUG OUTPUT FOLLOWS:\n$stderr_results"
fi
exit 1
}
http_404_error() {
echo "Omnitruck artifact does not exist for version $version on platform $platform"
echo ""
echo "Either this means:"
echo " - We do not support $platform"
echo " - We do not have an artifact for $version"
echo ""
echo "This is often the latter case due to running a prerelease or RC version of Cinc"
echo "or a gem version which was only pushed to rubygems and not omnitruck."
echo ""
echo "You may be able to set your knife[:bootstrap_version] to the most recent stable"
echo "release of Cinc to fix this problem (or the most recent stable major version number)."
echo ""
echo "In order to test the version parameter, adventurous users may take the Metadata URL"
echo "below and modify the '&v=<number>' parameter until you successfully get a URL that"
echo "does not 404 (e.g. via curl or wget). You should be able to use '&v=11' or '&v=12'"
echo "succesfully."
echo ""
echo "If you cannot fix this problem by setting the bootstrap_version, it probably means"
echo "that $platform is not supported."
echo ""
# deliberately do not call report_bug to suppress bug report noise.
echo "Metadata URL: $metadata_url"
if test "x$download_url" != "x"; then
echo "Download URL: $download_url"
fi
if test "x$stderr_results" != "x"; then
echo "\nDEBUG OUTPUT FOLLOWS:\n$stderr_results"
fi
exit 1
}
capture_tmp_stderr() {
# spool up /tmp/stderr from all the commands we called
if test -f "$tmp_dir/stderr"; then
output=`cat $tmp_dir/stderr`
stderr_results="${stderr_results}\nSTDERR from $1:\n\n$output\n"
rm $tmp_dir/stderr
fi
}
# do_wget URL FILENAME
do_wget() {
echo "trying wget..."
wget --user-agent="User-Agent: mixlib-install/3.11.27" -O "$2" "$1" 2>$tmp_dir/stderr
rc=$?
# check for 404
grep "ERROR 404" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "wget"
return 1
fi
return 0
}
# do_curl URL FILENAME
do_curl() {
echo "trying curl..."
curl -A "User-Agent: mixlib-install/3.11.27" --retry 5 -sL -D $tmp_dir/stderr "$1" > "$2"
rc=$?
# check for 404
grep "404 Not Found" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "curl"
return 1
fi
return 0
}
# do_fetch URL FILENAME
do_fetch() {
echo "trying fetch..."
fetch --user-agent="User-Agent: mixlib-install/3.11.27" -o "$2" "$1" 2>$tmp_dir/stderr
# check for bad return status
test $? -ne 0 && return 1
return 0
}
# do_perl URL FILENAME
do_perl() {
echo "trying perl..."
perl -e 'use LWP::Simple; getprint($ARGV[0]);' "$1" > "$2" 2>$tmp_dir/stderr
rc=$?
# check for 404
grep "404 Not Found" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "perl"
return 1
fi
return 0
}
# do_python URL FILENAME
do_python() {
echo "trying python..."
python -c "import sys,urllib2; sys.stdout.write(urllib2.urlopen(urllib2.Request(sys.argv[1], headers={ 'User-Agent': 'mixlib-install/3.11.27' })).read())" "$1" > "$2" 2>$tmp_dir/stderr
rc=$?
# check for 404
grep "HTTP Error 404" $tmp_dir/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
http_404_error
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
capture_tmp_stderr "python"
return 1
fi
return 0
}
# returns 0 if checksums match
do_checksum() {
if exists sha256sum; then
echo "Comparing checksum with sha256sum..."
checksum=`sha256sum $1 | awk '{ print $1 }'`
return `test "x$checksum" = "x$2"`
elif exists shasum; then
echo "Comparing checksum with shasum..."
checksum=`shasum -a 256 $1 | awk '{ print $1 }'`
return `test "x$checksum" = "x$2"`
else
echo "WARNING: could not find a valid checksum program, pre-install shasum or sha256sum in your O/S image to get valdation..."
return 0
fi
}
# do_download URL FILENAME
do_download() {
echo "downloading $1"
echo " to file $2"
url=`echo $1`
if test "x$platform" = "xsolaris2"; then
if test "x$platform_version" = "x5.9" -o "x$platform_version" = "x5.10"; then
# solaris 9 lacks openssl, solaris 10 lacks recent enough credentials - your base O/S is completely insecure, please upgrade
url=`echo $url | sed -e 's/https/http/'`
fi
fi
# we try all of these until we get success.
# perl, in particular may be present but LWP::Simple may not be installed
if exists wget; then
do_wget $url $2 && return 0
fi
if exists curl; then
do_curl $url $2 && return 0
fi
if exists fetch; then
do_fetch $url $2 && return 0
fi
if exists perl; then
do_perl $url $2 && return 0
fi
if exists python; then
do_python $url $2 && return 0
fi
unable_to_retrieve_package
}
# install_file TYPE FILENAME
# TYPE is "rpm", "deb", "solaris", "sh", etc.
install_file() {
echo "Installing $project $version"
case "$1" in
"rpm")
if test "x$platform" = "xnexus" || test "x$platform" = "xios_xr"; then
echo "installing with yum..."
yum install -yv "$2"
else
echo "installing with rpm..."
rpm -Uvh --oldpackage --replacepkgs "$2"
fi
;;
"deb")
echo "installing with dpkg..."
dpkg -i "$2"
;;
"bff")
echo "installing with installp..."
installp -aXYgd "$2" all
;;
"solaris")
echo "installing with pkgadd..."
echo "conflict=nocheck" > $tmp_dir/nocheck
echo "action=nocheck" >> $tmp_dir/nocheck
echo "mail=" >> $tmp_dir/nocheck
pkgrm -a $tmp_dir/nocheck -n $project >/dev/null 2>&1 || true
pkgadd -G -n -d "$2" -a $tmp_dir/nocheck $project
;;
"pkg")
echo "installing with installer..."
cd / && /usr/sbin/installer -pkg "$2" -target /
;;
"dmg")
echo "installing dmg file..."
hdiutil detach "/Volumes/cinc_project" >/dev/null 2>&1 || true
hdiutil attach "$2" -mountpoint "/Volumes/cinc_project"
cd / && /usr/sbin/installer -pkg `find "/Volumes/cinc_project" -name \*.pkg` -target /
hdiutil detach "/Volumes/cinc_project"
;;
"sh" )
echo "installing with sh..."
sh "$2"
;;
"p5p" )
echo "installing p5p package..."
pkg install -g "$2" $project
;;
*)
echo "Unknown filetype: $1"
report_bug
exit 1
;;
esac
if test $? -ne 0; then
echo "Installation failed"
report_bug
exit 1
fi
}
if test "x$TMPDIR" = "x"; then
tmp="/tmp"
else
tmp=$TMPDIR
fi
# secure-ish temp dir creation without having mktemp available (DDoS-able but not exploitable)
tmp_dir="$tmp/install.sh.$$"
(umask 077 && mkdir $tmp_dir) || exit 1
############
# end of helpers.sh
############
# script_cli_parameters.sh
############
# This section reads the CLI parameters for the install script and translates
# them to the local parameters to be used later by the script.
#
# Outputs:
# $version: Requested version to be installed.
# $channel: Channel to install the product from
# $project: Project to be installed
# $cmdline_filename: Name of the package downloaded on local disk.
# $cmdline_dl_dir: Name of the directory downloaded package will be saved to on local disk.
# $install_strategy: Method of package installations. default strategy is to always install upon exec. Set to "once" to skip if project is installed
# $download_url_override: Install package downloaded from a direct URL.
# $checksum: SHA256 for download_url_override file (optional)
############
# Defaults
channel="stable"
project="chef"
while getopts pnv:c:f:P:d:s:l:a opt
do
case "$opt" in
v) version="$OPTARG";;
c) channel="$OPTARG";;
p) channel="current";; # compat for prerelease option
n) channel="current";; # compat for nightlies option
f) cmdline_filename="$OPTARG";;
P) project="$OPTARG";;
d) cmdline_dl_dir="$OPTARG";;
s) install_strategy="$OPTARG";;
l) download_url_override="$OPTARG";;
a) checksum="$OPTARG";;
\?) # unknown flag
echo >&2 \
"usage: $0 [-P project] [-c release_channel] [-v version] [-f filename | -d download_dir] [-s install_strategy] [-l download_url_override] [-a checksum]"
exit 1;;
esac
done
shift `expr $OPTIND - 1`
if test -d "/opt/$project" && test "x$install_strategy" = "xonce"; then
echo "$project installation detected"
echo "install_strategy set to 'once'"
echo "Nothing to install"
exit
fi
# platform_detection.sh
############
# This section makes platform detection compatible with omnitruck on the system
# it runs.
#
# Outputs:
# $platform: Name of the platform.
# $platform_version: Version of the platform.
# $machine: System's architecture.
############
#
# Platform and Platform Version detection
#
# NOTE: This logic should match ohai platform and platform_version matching.
# do not invent new platform and platform_version schemas, just make this behave
# like what ohai returns as platform and platform_version for the system.
#
# ALSO NOTE: Do not mangle platform or platform_version here. It is less error
# prone and more future-proof to do that in the server, and then all omnitruck clients
# will 'inherit' the changes (install.sh is not the only client of the omnitruck
# endpoint out there).
#
machine=`uname -m`
os=`uname -s`
if test -f "/etc/lsb-release" && grep DISTRIB_ID /etc/lsb-release >/dev/null && ! grep wrlinux /etc/lsb-release >/dev/null; then
platform=`grep DISTRIB_ID /etc/lsb-release | cut -d "=" -f 2 | tr '[A-Z]' '[a-z]'`
platform_version=`grep DISTRIB_RELEASE /etc/lsb-release | cut -d "=" -f 2`
if test "$platform" = "\"cumulus linux\""; then
platform="cumulus_linux"
elif test "$platform" = "\"cumulus networks\""; then
platform="cumulus_networks"
fi
elif test -f "/etc/debian_version"; then
platform="debian"
platform_version=`cat /etc/debian_version`
elif test -f "/etc/Eos-release"; then
# EOS may also contain /etc/redhat-release so this check must come first.
platform=arista_eos
platform_version=`awk '{print $4}' /etc/Eos-release`
machine="i386"
elif test -f "/etc/redhat-release"; then
platform=`sed 's/^\(.\+\) release.*/\1/' /etc/redhat-release | tr '[A-Z]' '[a-z]'`
platform_version=`sed 's/^.\+ release \([.0-9]\+\).*/\1/' /etc/redhat-release`
if test "$platform" = "xenserver"; then
# Current XenServer 6.2 is based on CentOS 5, platform is not reset to "el" server should hanlde response
platform="xenserver"
else
# FIXME: use "redhat"
platform="el"
fi
elif test -f "/etc/system-release"; then
platform=`sed 's/^\(.\+\) release.\+/\1/' /etc/system-release | tr '[A-Z]' '[a-z]'`
platform_version=`sed 's/^.\+ release \([.0-9]\+\).*/\1/' /etc/system-release | tr '[A-Z]' '[a-z]'`
case $platform in amazon*) # sh compat method of checking for a substring
platform="el"
. /etc/os-release
platform_version=$VERSION_ID
if test "$platform_version" = "2"; then
platform_version="7"
else
# VERSION_ID will match YYYY.MM for Amazon Linux AMIs
platform_version="6"
fi
esac
# Apple OS X
elif test -f "/usr/bin/sw_vers"; then
platform="mac_os_x"
# Matching the tab-space with sed is error-prone
platform_version=`sw_vers | awk '/^ProductVersion:/ { print $2 }' | cut -d. -f1,2`
# x86_64 Apple hardware often runs 32-bit kernels (see OHAI-63)
x86_64=`sysctl -n hw.optional.x86_64`
if test $x86_64 -eq 1; then
machine="x86_64"
fi
elif test -f "/etc/release"; then
machine=`/usr/bin/uname -p`
if grep SmartOS /etc/release >/dev/null; then
platform="smartos"
platform_version=`grep ^Image /etc/product | awk '{ print $3 }'`
else
platform="solaris2"
platform_version=`/usr/bin/uname -r`
fi
elif test -f "/etc/SuSE-release"; then
if grep 'Enterprise' /etc/SuSE-release >/dev/null;
then
platform="sles"
platform_version=`awk '/^VERSION/ {V = $3}; /^PATCHLEVEL/ {P = $3}; END {print V "." P}' /etc/SuSE-release`
else
platform="opensuseleap"
platform_version=`awk '/^VERSION =/ { print $3 }' /etc/SuSE-release`
fi
elif test "x$os" = "xFreeBSD"; then
platform="freebsd"
platform_version=`uname -r | sed 's/-.*//'`
elif test "x$os" = "xAIX"; then
platform="aix"
platform_version="`uname -v`.`uname -r`"
machine="powerpc"
elif test -f "/etc/os-release"; then
. /etc/os-release
if test "x$CISCO_RELEASE_INFO" != "x"; then
. $CISCO_RELEASE_INFO
fi
platform=$ID
platform_version=$VERSION
fi
if test "x$platform" = "x"; then
echo "Unable to determine platform version!"
report_bug
exit 1
fi
#
# NOTE: platform mangling in the install.sh is DEPRECATED
#
# - install.sh should be true to ohai and should not remap
# platform or platform versions.
#
# - remapping platform and mangling platform version numbers is
# now the complete responsibility of the server-side endpoints
#
major_version=`echo $platform_version | cut -d. -f1`
case $platform in
# FIXME: should remove this case statement completely
"el")
# FIXME: "el" is deprecated, should use "redhat"
platform_version=$major_version
;;
"debian")
if test "x$major_version" = "x5"; then
# This is here for potential back-compat.
# We do not have 5 in versions we publish for anymore but we
# might have it for earlier versions.
platform_version="6"
else
platform_version=$major_version
fi
;;
"freebsd")
platform_version=$major_version
;;
"sles")
platform_version=$major_version
;;
"opensuseleap")
platform_version=$major_version
;;
esac
# normalize the architecture we detected
case $machine in
"x86_64"|"amd64"|"x64")
machine="x86_64"
;;
"i386"|"i86pc"|"x86"|"i686")
machine="i386"
;;
"sparc"|"sun4u"|"sun4v")
machine="sparc"
;;
esac
if test "x$platform_version" = "x"; then
echo "Unable to determine platform version!"
report_bug
exit 1
fi
if test "x$platform" = "xsolaris2"; then
# hack up the path on Solaris to find wget, pkgadd
PATH=/usr/sfw/bin:/usr/sbin:$PATH
export PATH
fi
echo "$platform $platform_version $machine"
############
# end of platform_detection.sh
############
# All of the download utilities in this script load common proxy env vars.
# If variables are set they will override any existing env vars.
# Otherwise, default proxy env vars will be loaded by the respective
# download utility.
if test "x$https_proxy" != "x"; then
echo "setting https_proxy: $https_proxy"
HTTPS_PROXY=$https_proxy
https_proxy=$https_proxy
export HTTPS_PROXY
export https_proxy
fi
if test "x$http_proxy" != "x"; then
echo "setting http_proxy: $http_proxy"
HTTP_PROXY=$http_proxy
http_proxy=$http_proxy
export HTTP_PROXY
export http_proxy
fi
if test "x$ftp_proxy" != "x"; then
echo "setting ftp_proxy: $ftp_proxy"
FTP_PROXY=$ftp_proxy
ftp_proxy=$ftp_proxy
export FTP_PROXY
export ftp_proxy
fi
if test "x$no_proxy" != "x"; then
echo "setting no_proxy: $no_proxy"
NO_PROXY=$no_proxy
no_proxy=$no_proxy
export NO_PROXY
export no_proxy
fi
# fetch_metadata.sh
############
# This section calls omnitruck to get the information about the build to be
# installed.
#
# Inputs:
# $channel:
# $project:
# $version:
# $platform:
# $platform_version:
# $machine:
# $tmp_dir:
#
# Outputs:
# $download_url:
# $sha256:
############
if test "x$download_url_override" = "x"; then
echo "Getting information for $project $channel $version for $platform..."
metadata_filename="$tmp_dir/metadata.txt"
metadata_url="https://omnitruck.cinc.sh/$channel/$project/metadata?v=$version&p=$platform&pv=$platform_version&m=$machine"
do_download "$metadata_url" "$metadata_filename"
cat "$metadata_filename"
echo ""
# check that all the mandatory fields in the downloaded metadata are there
if grep '^url' $metadata_filename > /dev/null && grep '^sha256' $metadata_filename > /dev/null; then
echo "downloaded metadata file looks valid..."
else
echo "downloaded metadata file is corrupted or an uncaught error was encountered in downloading the file..."
# this generally means one of the download methods downloaded a 404 or something like that and then reported a successful exit code,
# and this should be fixed in the function that was doing the download.
report_bug
exit 1
fi
download_url=`awk '$1 == "url" { print $2 }' "$metadata_filename"`
sha256=`awk '$1 == "sha256" { print $2 }' "$metadata_filename"`
else
download_url=$download_url_override
# Set sha256 to empty string if checksum not set
sha256=${checksum=""}
fi
############
# end of fetch_metadata.sh
############
# fetch_package.sh
############
# This section fetches a package from $download_url and verifies its metadata.
#
# Inputs:
# $download_url:
# $tmp_dir:
# Optional Inputs:
# $cmdline_filename: Name of the package downloaded on local disk.
# $cmdline_dl_dir: Name of the directory downloaded package will be saved to on local disk.
#
# Outputs:
# $download_filename: Name of the downloaded file on local disk.
# $filetype: Type of the file downloaded.
############
filename=`echo $download_url | sed -e 's/^.*\///'`
filetype=`echo $filename | sed -e 's/^.*\.//'`
# use either $tmp_dir, the provided directory (-d) or the provided filename (-f)
if test "x$cmdline_filename" != "x"; then
download_filename="$cmdline_filename"
elif test "x$cmdline_dl_dir" != "x"; then
download_filename="$cmdline_dl_dir/$filename"
else
download_filename="$tmp_dir/$filename"
fi
# ensure the parent directory where we download the installer always exists
download_dir=`dirname $download_filename`
(umask 077 && mkdir -p $download_dir) || exit 1
# check if we have that file locally available and if so verify the checksum
# Use cases
# 1) metadata - new download
# 2) metadata - cached download when cmdline_dl_dir set
# 3) url override - no checksum new download
# 4) url override - with checksum new download
# 5) url override - with checksum cached download when cmdline_dl_dir set
cached_file_available="false"
verify_checksum="true"
if test -f $download_filename; then
echo "$download_filename exists"
cached_file_available="true"
fi
if test "x$download_url_override" != "x"; then
echo "Download URL override specified"
if test "x$cached_file_available" = "xtrue"; then
echo "Verifying local file"
if test "x$sha256" = "x"; then
echo "Checksum not specified, ignoring existing file"
cached_file_available="false" # download new file
verify_checksum="false" # no checksum to compare after download
elif do_checksum "$download_filename" "$sha256"; then
echo "Checksum match, using existing file"
cached_file_available="true" # don't need to download file
verify_checksum="false" # don't need to checksum again
else
echo "Checksum mismatch, ignoring existing file"
cached_file_available="false" # download new file
verify_checksum="true" # checksum new downloaded file
fi
else
echo "$download_filename not found"
cached_file_available="false" # download new file
if test "x$sha256" = "x"; then
verify_checksum="false" # no checksum to compare after download
else
verify_checksum="true" # checksum new downloaded file
fi
fi
fi
if test "x$cached_file_available" != "xtrue"; then
do_download "$download_url" "$download_filename"
fi
if test "x$verify_checksum" = "xtrue"; then
do_checksum "$download_filename" "$sha256" || checksum_mismatch
fi
############
# end of fetch_package.sh
############
# install_package.sh
############
# Installs a package and removed the temp directory.
#
# Inputs:
# $download_filename: Name of the file to be installed.
# $filetype: Type of the file to be installed.
# $version: The version requested. Used only for warning user if not set.
############
if test "x$version" = "x" -a "x$CI" != "xtrue"; then
echo
echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
echo
echo "You are installing a package without a version pin. If you are installing"
echo "on production servers via an automated process this is DANGEROUS and you will"
echo "be upgraded without warning on new releases, even to new major releases."
echo "Letting the version float is only appropriate in desktop, test, development or"
echo "CI/CD environments."
echo
echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
echo
fi
install_file $filetype "$download_filename"
if test "x$tmp_dir" != "x"; then
rm -r "$tmp_dir"
fi
############
# end of install_package.sh
############

View File

@ -1,21 +0,0 @@
- name: Install client
script:
cmd: "{{ openstack_chef_client_type }}-install.sh -v {{ openstack_chef_client_version }}"
become: yes
- name: Create symlink for cinc
file:
state: link
src: /opt/cinc
dest: /opt/chef
become: yes
when: openstack_chef_client_type == 'cinc'
- name: Install berkshelf
shell: /opt/chef/embedded/bin/gem install -N berkshelf
become: yes
- name: Install cookstyle
shell: /opt/chef/embedded/bin/gem install -N cookstyle
become: yes
- name: Setup SELinux
shell: "setenforce {{ openstack_selinux_setenforce }}"
become: yes
when: ansible_distribution == "CentOS"

View File

@ -1,19 +0,0 @@
{
"name": "allinone",
"description": "This will deploy all of the services for Openstack Compute to function on a single box.",
"run_list": [
"role[common]",
"role[ops_database]",
"role[ops_messaging]",
"role[identity]",
"role[image]",
"role[network]",
"role[compute]",
"role[block_storage]",
"role[bare_metal]",
"role[orchestration]",
"role[telemetry]",
"role[dns]",
"role[dashboard]"
]
}

View File

@ -1,12 +0,0 @@
{
"name": "bare_metal",
"description": "Deploy bare metal services",
"run_list": [
"role[identity]",
"role[image]",
"role[network]",
"recipe[openstack-bare-metal::api]",
"recipe[openstack-bare-metal::conductor]",
"recipe[openstack-bare-metal::identity_registration]"
]
}

Some files were not shown because too many files have changed in this diff Show More