Merge "Install openstack.kolla collection"

This commit is contained in:
Zuul 2022-02-21 21:51:10 +00:00 committed by Gerrit Code Review
commit 8ff7b51fef
22 changed files with 109 additions and 845 deletions

View File

@ -6,5 +6,5 @@
serial: '{{ kolla_serial|default("0") }}'
gather_facts: false
roles:
- { role: baremetal,
- { role: openstack.kolla.baremetal,
tags: baremetal }

View File

@ -1,99 +0,0 @@
---
# Whether to enable a package repository for Docker.
enable_docker_repo: true
# Docker APT repository configuration.
docker_apt_url: "https://download.docker.com/linux/{{ ansible_facts.distribution | lower }}"
docker_apt_repo: "deb {{ docker_apt_url }} {{ ansible_facts.distribution_release }} stable"
docker_apt_key_file: "gpg"
docker_apt_key_id: "0EBFCD88"
docker_apt_package: "docker-ce"
# Docker Yum repository configuration.
docker_yum_url: "https://download.docker.com/linux/{{ ansible_facts.distribution | lower }}"
docker_yum_baseurl: "{{ docker_yum_url }}/$releasever/$basearch/stable"
docker_yum_gpgkey: "{{ docker_yum_url }}/gpg"
docker_yum_gpgcheck: true
docker_yum_package: "docker-ce"
ceph_version: "pacific"
epel_version: "8"
ceph_url: "https://download.ceph.com"
# Ceph APT repository configuration.
ceph_apt_url: "{{ ceph_url }}/debian-{{ ceph_version }}/"
ceph_apt_repo: "deb {{ ceph_apt_url }} {{ ansible_facts.distribution_release }} main"
ceph_apt_key_file: "{{ ceph_url }}/keys/release.gpg"
ceph_apt_key_id: "460F3994"
ceph_apt_package: "ceph-common"
# Ceph Yum repository configuration.
ceph_yum_url: "{{ ceph_url }}/rpm-{{ ceph_version }}/"
ceph_yum_baseurl: "{{ ceph_yum_url }}el{{ epel_version }}/$basearch/"
ceph_yum_gpgkey: "{{ ceph_url }}/keys/release.asc"
ceph_yum_gpgcheck: true
ceph_yum_package: "ceph-common"
epel_yum_package: "epel-release"
customize_etc_hosts: True
create_kolla_user: True
create_kolla_user_sudoers: "{{ create_kolla_user }}"
kolla_user: "kolla"
kolla_group: "kolla"
change_selinux: True
selinux_state: "permissive"
# If true, the host firewall service (firewalld or ufw) will be disabled.
disable_firewall: True
docker_storage_driver: ""
docker_custom_option: ""
docker_custom_config: {}
docker_http_proxy: ""
docker_https_proxy: ""
docker_no_proxy: ""
# Version of python used to execute Ansible modules.
host_python_version: "{{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}"
debian_pkg_install:
- "{{ docker_apt_package }}"
- git
- "python3-setuptools"
- "python3-pip"
- "{% if virtualenv is not none %}python3-virtualenv{% endif %}"
- "{% if enable_multipathd|bool %}sg3-utils-udev{% endif %}"
- "{% if not docker_disable_default_iptables_rules | bool %}iptables{% endif %}"
redhat_pkg_install:
- "{{ docker_yum_package }}"
- git
- "python3-pip"
- "{% if virtualenv is not none %}python3-virtualenv{% endif %}"
- sudo
- "{% if not docker_disable_default_iptables_rules | bool %}iptables{% endif %}"
ubuntu_pkg_removals:
- lxd
- lxc
- libvirt-bin
- open-iscsi
redhat_pkg_removals:
- libvirt
- libvirt-daemon
- iscsi-initiator-utils
# Path to a virtualenv in which to install python packages. If None, a
# virtualenv will not be used.
virtualenv:
# Whether the virtualenv will inherit packages from the global site-packages
# directory. This is typically required for modules such as yum and apt which
# are not available on PyPI.
virtualenv_site_packages: True

View File

@ -1,16 +0,0 @@
---
- import_tasks: pre-install.yml
- import_tasks: install.yml
- import_tasks: post-install.yml
- include_tasks: configure-containerd-for-zun.yml
when:
- containerd_configure_for_zun|bool
- "'zun-cni-daemon' in group_names"
- include_tasks: configure-ceph-for-zun.yml
when:
- zun_configure_for_cinder_ceph | bool
- "'zun-compute' in group_names"

View File

@ -1 +0,0 @@
---

View File

@ -1,55 +0,0 @@
---
- name: Install ceph-common
block:
- block:
- name: Install ceph apt gpg key
apt_key:
url: "{{ ceph_apt_key_file }}"
id: "{{ ceph_apt_key_id }}"
state: present
become: True
- name: Enable ceph apt repository
apt_repository:
repo: "{{ ceph_apt_repo }}"
filename: ceph
become: True
- name: Install apt packages
package:
name: "{{ ceph_apt_package }}"
state: present
become: True
when: ansible_facts.os_family == 'Debian'
- block:
- name: Enable ceph yum repository
yum_repository:
name: ceph
description: Ceph main Repository
baseurl: "{{ ceph_yum_baseurl }}"
gpgcheck: "{{ ceph_yum_gpgcheck | bool }}"
gpgkey: "{{ ceph_yum_gpgkey }}"
become: True
- name: Enable epel yum repository
package:
name: "{{ epel_yum_package }}"
state: present
become: True
- name: Install ceph rpm gpg key
rpm_key:
state: present
key: "{{ ceph_yum_gpgkey }}"
become: True
when:
- ceph_yum_gpgcheck | bool
- name: Install RPM packages
package:
name: "{{ ceph_yum_package }}"
state: present
enablerepo: epel
become: True
when: ansible_facts.os_family == 'RedHat'

View File

@ -1,50 +0,0 @@
---
- name: Ensuring CNI config directory exist
file:
path: "{{ cni_config_dir }}"
state: "directory"
mode: "0770"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
become: True
- name: Copying CNI config file
template:
src: "10-zun-cni.conf.j2"
dest: "{{ cni_config_dir }}/10-zun-cni.conf"
mode: "0660"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
become: True
register: cni_configured
- name: Ensuring CNI bin directory exist
file:
path: "{{ cni_bin_dir }}"
state: "directory"
mode: "0770"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
become: True
- name: Copy zun-cni script
template:
src: "zun-cni.j2"
dest: "{{ cni_bin_dir }}/zun-cni"
mode: "0775"
become: True
- name: Copying over containerd config
template:
src: "containerd_config.toml.j2"
dest: "/etc/containerd/config.toml"
mode: "0660"
become: true
register: containerd_configured
- name: Restart containerd
service:
name: containerd
state: restarted
become: True
when: cni_configured.changed or containerd_configured.changed

View File

@ -1,162 +0,0 @@
---
- name: Update apt cache
apt:
update_cache: yes
become: True
when: ansible_facts.os_family == 'Debian'
# TODO(inc0): Gates don't seem to have ufw executable, check for it instead of ignore errors
- block:
- name: Set firewall default policy # noqa ignore-errors
become: True
ufw:
state: disabled
policy: allow
when: ansible_facts.os_family == 'Debian'
ignore_errors: yes
- name: Check if firewalld is installed
command: rpm -q firewalld
register: firewalld_check
changed_when: false
failed_when: firewalld_check.rc > 1
args:
warn: false
when: ansible_facts.os_family == 'RedHat'
- name: Disable firewalld
become: True
service:
name: "{{ item }}"
enabled: false
state: stopped
with_items:
- firewalld
when:
- ansible_facts.os_family == 'RedHat'
- firewalld_check.rc == 0
when: disable_firewall | bool
# Upgrading docker engine may cause containers to stop. Take a snapshot of the
# running containers prior to a potential upgrade of Docker.
- name: Check which containers are running
command: docker ps -f 'status=running' -q
become: true
# If Docker is not installed this command may exit non-zero.
failed_when: false
changed_when: false
register: running_containers
# APT starts Docker engine right after installation, which creates
# iptables rules before we disable iptables in Docker config
- name: Check if docker systemd unit exists
stat:
path: /etc/systemd/system/docker.service
register: docker_unit_file
- name: Mask the docker systemd unit on Debian/Ubuntu
file:
src: /dev/null
dest: /etc/systemd/system/docker.service
owner: root
group: root
state: link
become: true
when:
- ansible_facts.os_family == 'Debian'
- not docker_unit_file.stat.exists
- name: Install apt packages
package:
name: "{{ (debian_pkg_install | join(' ')).split() }}"
state: present
become: True
when: ansible_facts.os_family == 'Debian'
register: apt_install_result
- name: Install deltarpm packages
package:
name: drpm
state: present
update_cache: yes
become: True
when: ansible_facts.os_family == 'RedHat'
- name: Install RPM packages
package:
name: "{{ (redhat_pkg_install | join(' ')).split() }}"
state: present
update_cache: yes
become: True
when: ansible_facts.os_family == 'RedHat'
register: rpm_install_result
# If any packages were updated, and any containers were running, wait for the
# daemon to come up and start all previously running containers.
- block:
# At some point (at least on CentOS 7) Docker CE stopped starting
# automatically after an upgrade from legacy docker . Start it manually.
- name: Start docker
systemd:
name: docker
state: started
enabled: yes
masked: no
become: True
- name: Wait for Docker to start
command: docker info
become: true
changed_when: false
register: result
until: result is success
retries: 6
delay: 10
- name: Ensure containers are running after Docker upgrade
command: "docker start {{ running_containers.stdout }}"
become: true
when:
- install_result is changed
- running_containers.rc == 0
- running_containers.stdout != ''
vars:
install_result: "{{ rpm_install_result if ansible_facts.os_family == 'RedHat' else apt_install_result }}"
- name: Install latest pip in the virtualenv
pip:
# NOTE(hrw) pip 19.3 is first version complaining about being run with Python 2
name: pip>19.3
virtualenv: "{{ virtualenv }}"
virtualenv_site_packages: "{{ virtualenv_site_packages }}"
virtualenv_python: "python{{ host_python_version }}"
become: True
when: virtualenv is not none
- name: Install docker SDK for python
pip:
# NOTE(hrw) docker 2.4.2 is in kolla-ansible requirements
# NOTE(mnasiadka): docker 5.0.0 lacks six in deps but requires it
name: docker>=2.4.2,<5.0.0
executable: "{{ virtualenv is none | ternary('pip3', omit) }}"
virtualenv: "{{ virtualenv is none | ternary(omit, virtualenv) }}"
virtualenv_site_packages: "{{ virtualenv is none | ternary(omit, virtualenv_site_packages) }}"
virtualenv_python: "{{ virtualenv is none | ternary(omit, 'python' ~ host_python_version) }}"
become: True
- name: Remove packages
package:
name: "{{ (ubuntu_pkg_removals | join(' ')).split() }}"
state: absent
become: True
when: ansible_facts.os_family == 'Debian'
- name: Remove packages
package:
name: "{{ (redhat_pkg_removals | join(' ')).split() }}"
state: absent
become: True
when: ansible_facts.os_family == 'RedHat'

View File

@ -1,2 +0,0 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -1,257 +0,0 @@
---
- name: Create kolla user
user:
name: "{{ kolla_user }}"
state: present
group: "{{ kolla_group }}"
groups: "sudo"
append: true
become: True
when: create_kolla_user | bool
- name: Add public key to kolla user authorized keys
authorized_key:
user: "{{ kolla_user }}"
key: "{{ kolla_ssh_key.public_key }}"
become: True
when: create_kolla_user | bool
- name: Grant kolla user passwordless sudo
lineinfile:
dest: /etc/sudoers.d/kolla-ansible-users
state: present
create: yes
mode: '0640'
regexp: '^{{ kolla_user }}'
line: '{{ kolla_user }} ALL=(ALL) NOPASSWD: ALL'
become: True
when: create_kolla_user_sudoers | bool
- name: Ensure virtualenv has correct ownership
file:
path: "{{ virtualenv }}"
recurse: True
state: directory
owner: "{{ kolla_user }}"
group: "{{ kolla_group }}"
become: True
when: virtualenv is not none
- name: Ensure node_config_directory directory exists for user kolla
file:
path: "{{ node_config_directory }}"
state: directory
owner: "{{ kolla_user }}"
group: "{{ kolla_group }}"
mode: 0755
become: True
when: create_kolla_user | bool
- name: Ensure node_config_directory directory exists
file:
path: "{{ node_config_directory }}"
state: directory
mode: 0755
become: True
when: not create_kolla_user | bool
- name: Ensure docker config directory exists
file:
path: /etc/docker
state: directory
become: True
- name: Merge Zun docker config
set_fact:
docker_config: "{{ docker_config | combine(docker_zun_config) }}"
when:
- docker_configure_for_zun | bool
- "'zun-compute' in group_names"
- name: Warn about deprecations
debug:
msg: >
docker_custom_option is deprecated in favor of docker_custom_config
when: docker_custom_option | length > 0
- name: Setup docker insecure registries
vars:
registries: ["{{ docker_registry }}"]
set_fact:
docker_config: "{{ docker_config | combine({'insecure-registries': registries}) }}"
when: docker_registry_insecure | bool
- name: Setup docker storage driver
set_fact:
docker_config: "{{ docker_config | combine({'storage-driver': docker_storage_driver}) }}"
when: docker_storage_driver | length > 0
- name: Setup docker runtime directory
set_fact:
docker_config: "{{ docker_config | combine({'data-root': docker_runtime_directory}) }}"
when: docker_runtime_directory | length > 0
- name: Warn about docker default iptables
debug:
msg: >-
Docker default iptables rules will be disabled by default from the Wallaby 12.0.0
release. If you have any non-Kolla containers that need this functionality, you should
plan a migration for this change, or set docker_disable_default_iptables_rules to false.
when: not docker_disable_default_iptables_rules | bool
- name: Disable docker default iptables rules
set_fact:
docker_config: "{{ docker_config | combine({'iptables': false}) }}"
when: docker_disable_default_iptables_rules | bool
- name: Warn about docker default networking
debug:
msg: >-
Docker default network on docker0 will be disabled by default from the
Wallaby 12.0.0 release. If you have any non-Kolla containers that need
this functionality, you should plan a migration for this change, or set
docker_disable_default_network to false.
when: not docker_disable_default_network | bool
- name: Disable docker default network on docker0
set_fact:
docker_config: "{{ docker_config | combine({'bridge': 'none'}) }}"
when: docker_disable_default_network | bool
- name: Warn about docker ip_forward
debug:
msg: >-
Docker ip_forward will be disabled by default from the
Wallaby 12.0.0 release. If you have any non-Kolla containers that need
this functionality, you should plan a migration for this change, or set
docker_disable_ip_forward to false.
when: not docker_disable_ip_forward | bool
- name: Disable docker ip_forward
set_fact:
docker_config: "{{ docker_config | combine({'ip-forward': false}) }}"
when: docker_disable_ip_forward | bool
- name: Merge custom docker config
set_fact:
docker_config: "{{ docker_config | combine(docker_custom_config) }}"
- name: Write docker config
become: True
copy:
content: "{{ docker_config | to_nice_json }}"
dest: /etc/docker/daemon.json
mode: 0644
register: docker_configured
- name: Remove old docker options file
become: True
file:
path: /etc/systemd/system/docker.service.d/kolla.conf
state: absent
when:
- not docker_custom_option
- not docker_configure_for_zun | bool or 'zun-compute' not in group_names
- not docker_http_proxy
- not docker_https_proxy
- not docker_no_proxy
- name: Ensure docker service directory exists
become: True
file:
path: /etc/systemd/system/docker.service.d
state: directory
recurse: yes
when: >
docker_custom_option | length > 0 or
(docker_configure_for_zun | bool and 'zun-compute' in group_names) or
docker_http_proxy | length > 0 or
docker_https_proxy | length > 0 or
docker_no_proxy | length > 0
- name: Configure docker service
become: True
template:
src: docker_systemd_service.j2
dest: /etc/systemd/system/docker.service.d/kolla.conf
when: >
docker_custom_option | length > 0 or
(docker_configure_for_zun | bool and 'zun-compute' in group_names) or
docker_http_proxy | length > 0 or
docker_https_proxy | length > 0 or
docker_no_proxy | length > 0
- name: Reload docker service file
become: True
systemd:
name: docker
daemon_reload: yes
register: docker_reloaded
- name: Get stat of libvirtd apparmor profile
stat:
path: /etc/apparmor.d/usr.sbin.libvirtd
register: apparmor_libvirtd_profile
when: ansible_facts.distribution == "Ubuntu"
- name: Get stat of libvirtd apparmor disable profile
stat:
path: /etc/apparmor.d/disable/usr.sbin.libvirtd
register: apparmor_libvirtd_disable_profile
when: ansible_facts.distribution == "Ubuntu"
- name: Remove apparmor profile for libvirt
shell: |
apparmor_parser -v -R /etc/apparmor.d/usr.sbin.libvirtd && \
ln -vsf /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable
args:
executable: /bin/bash
become: True
when:
- ansible_facts.distribution == "Ubuntu"
- apparmor_libvirtd_profile.stat.exists
- not apparmor_libvirtd_disable_profile.stat.exists
- name: Create docker group
group:
name: docker
become: True
- name: Add kolla user to docker group
user:
name: "{{ kolla_user }}"
append: yes
groups: docker
become: True
when: create_kolla_user | bool
- name: Start docker
systemd:
name: docker
state: started
masked: no
become: True
- name: Restart docker
systemd:
name: docker
state: restarted
masked: no
become: True
when: docker_configured.changed or docker_reloaded.changed
- name: Enable docker
systemd:
name: docker
enabled: yes
masked: no
become: True
- name: Change state of selinux
selinux:
policy: targeted
state: "{{ selinux_state }}"
become: true
when:
- change_selinux | bool
- ansible_facts.os_family == "RedHat"

View File

@ -1,159 +0,0 @@
---
- name: Ensure localhost in /etc/hosts
lineinfile:
dest: /etc/hosts
regexp: "^127.0.0.1.*"
line: "127.0.0.1 localhost"
state: present
become: True
when: customize_etc_hosts | bool
# NOTE(mgoddard): Ubuntu may include a line in /etc/hosts that makes the local
# hostname and fqdn point to 127.0.1.1. This can break
# RabbitMQ, which expects the hostname to resolve to the API network address.
# Remove the troublesome entry.
# see https://bugs.launchpad.net/kolla-ansible/+bug/1837699
# and https://bugs.launchpad.net/kolla-ansible/+bug/1862739
- name: Ensure hostname does not point to 127.0.1.1 in /etc/hosts
lineinfile:
dest: /etc/hosts
regexp: "^127.0.1.1\\b.*\\s{{ ansible_facts.hostname }}\\b"
state: absent
become: True
when: customize_etc_hosts | bool
- name: Generate /etc/hosts for all of the nodes
blockinfile:
dest: /etc/hosts
marker: "# {mark} ANSIBLE GENERATED HOSTS"
block: |
{% for host in groups['baremetal'] %}
{% set api_interface = hostvars[host]['api_interface'] %}
{% if host not in groups['bifrost'] or api_interface in hostvars[host].ansible_facts %}
{% set hostnames = [hostvars[host].ansible_facts.nodename, hostvars[host].ansible_facts.hostname] %}
{{ 'api' | kolla_address(host) }} {{ hostnames | unique | join(' ') }}
{% endif %}
{% endfor %}
become: True
when:
- customize_etc_hosts | bool
# Skip hosts in the bifrost group that do not have a valid api_interface.
- inventory_hostname not in groups['bifrost'] or
hostvars[inventory_hostname]['api_interface'] | replace('-', '_') in hostvars[inventory_hostname].ansible_facts
- name: Ensure unprivileged users can use ping
become: true
sysctl:
name: net.ipv4.ping_group_range
value: '0 2147483647'
state: present
sysctl_file: "{{ kolla_sysctl_conf_path }}"
when: ansible_facts.os_family == 'RedHat'
# NOTE(osmanlicilegi): The distribution might come with cloud-init installed, and manage_etc_hosts
# configuration enabled. If so, it will override the file /etc/hosts from cloud-init templates at
# every boot, which will break RabbitMQ. To prevent this happens, first we check whether cloud-init
# has been installed, and then set manage_etc_hosts to false.
- name: Check whether cloud-init has been installed, and ensure manage_etc_hosts is disabled
block:
- name: Ensure /etc/cloud/cloud.cfg exists
stat:
path: /etc/cloud/cloud.cfg
register: cloud_init
- name: Disable cloud-init manage_etc_hosts
copy:
content: "manage_etc_hosts: false"
dest: /etc/cloud/cloud.cfg.d/99-kolla.cfg
mode: "0660"
when: cloud_init.stat.exists
become: True
when: customize_etc_hosts | bool
- name: Ensure sudo group is present
group:
name: sudo
state: present
become: True
- name: Ensure kolla group is present
group:
name: "{{ kolla_group }}"
state: present
become: True
when: create_kolla_user | bool
- block:
- block:
- name: Install apt packages
apt:
update_cache: yes
become: True
- name: Install CA certificates and gnupg packages
package:
name: "{{ item }}"
state: latest
become: True
with_items:
- ca-certificates
- gnupg
- name: Ensure apt sources list directory exists
file:
path: /etc/apt/sources.list.d
state: directory
recurse: yes
become: True
- name: Install docker apt gpg key
apt_key:
url: "{{ docker_apt_url }}/{{ docker_apt_key_file }}"
id: "{{ docker_apt_key_id }}"
state: present
become: True
- name: Enable docker apt repository
apt_repository:
repo: "{{ docker_apt_repo }}"
filename: docker
become: True
when: ansible_facts.os_family == 'Debian'
- block:
- name: Ensure yum repos directory exists
file:
path: /etc/yum.repos.d/
state: directory
recurse: yes
become: True
- name: Enable docker yum repository
yum_repository:
name: docker
description: Docker main Repository
baseurl: "{{ docker_yum_baseurl }}"
gpgcheck: "{{ docker_yum_gpgcheck | bool }}"
gpgkey: "{{ docker_yum_gpgkey }}"
become: True
# NOTE(yoctozepto): above cannot set this but we require it
# to install containerd.io due to runc being a modular package
# in CentOS 8
# see: https://bugzilla.redhat.com/show_bug.cgi?id=1734081
- name: Ensure module_hotfixes enabled for docker
lineinfile:
dest: /etc/yum.repos.d/docker.repo
regexp: "^module_hotfixes"
line: "module_hotfixes = True"
state: present
become: True
- name: Install docker rpm gpg key
rpm_key:
state: present
key: "{{ docker_yum_gpgkey }}"
become: True
when: docker_yum_gpgcheck | bool
when: ansible_facts.os_family == 'RedHat'
when: enable_docker_repo | bool

View File

@ -1,5 +0,0 @@
{
"cniVersion": "0.3.1",
"name": "zun",
"type": "zun-cni"
}

View File

@ -1,2 +0,0 @@
[grpc]
gid = {{ containerd_grpc_gid }}

View File

@ -1,13 +0,0 @@
[Service]
{% if docker_http_proxy | length > 0 %}
Environment="HTTP_PROXY={{ docker_http_proxy }}"
{% endif %}
{% if docker_https_proxy | length > 0 %}
Environment="HTTPS_PROXY={{ docker_https_proxy }}"
{% endif %}
{% if docker_no_proxy | length > 0 %}
Environment="NO_PROXY={{ docker_no_proxy }}"
{% endif %}
ExecStart=
# ExecStart commandline copied from 'docker-ce' package. Same on CentOS/Debian/Ubuntu systems.
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock{% if docker_custom_option %} {{ docker_custom_option }}{% endif %}{% if docker_configure_for_zun|bool and 'zun-compute' in group_names %} {{ docker_zun_options }}{% endif %}

View File

@ -1,12 +0,0 @@
#!/bin/bash
env_list=""
for line in $(env | grep "CNI_")
do
key=$(echo "$line" | cut -d "=" -f 1)
value=$(echo "$line" | cut -d "=" -f 2-)
env_list="$env_list --env ${key}=\"${value}\""
done
cmd="docker exec -i $env_list zun_cni_daemon zun-cni <&0"
eval "$cmd"

View File

@ -1,8 +0,0 @@
---
docker_config:
log-opts:
max-file: "{{ docker_log_max_file }}"
max-size: "{{ docker_log_max_size }}"
cni_config_dir: /etc/cni/net.d
cni_bin_dir: /opt/cni/bin

View File

@ -112,6 +112,13 @@ First, upgrade the ``kolla-ansible`` package:
If you are running from Git repository, then just checkout the desired
branch and run ``pip3 install --upgrade`` with the repository directory.
If upgrading to a Yoga release or later, install or upgrade Ansible Galaxy
dependencies:
.. code-block:: console
kolla-ansible install-deps
The inventory file for the deployment should be updated, as the newer sample
inventory files may have updated layout or other relevant changes.
The ``diff`` tool (or similar) is your friend in this task.

View File

@ -245,6 +245,15 @@ Install Kolla for development
cp kolla-ansible/ansible/inventory/* .
Install Ansible Galaxy requirements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install Ansible Galaxy dependencies (Yoga release onwards):
.. code-block:: console
kolla-ansible install-deps
Configure Ansible
~~~~~~~~~~~~~~~~~

5
requirements.yml Normal file
View File

@ -0,0 +1,5 @@
---
collections:
- name: https://opendev.org/openstack/ansible-collection-kolla
type: git
version: master

View File

@ -38,6 +38,7 @@ data_files =
share/kolla-ansible = tools/init-runonce
share/kolla-ansible = tools/init-vpn
share/kolla-ansible = setup.cfg
share/kolla-ansible = requirements.yml
scripts =
tools/kolla-ansible

View File

@ -14,6 +14,7 @@
set_fact:
kolla_inventory_path: "/etc/kolla/inventory"
logs_dir: "/tmp/logs"
ansible_collection_kolla_src_dir: "{{ ansible_env.PWD }}/src/{{ zuul.project.canonical_hostname }}/openstack/ansible-collection-kolla"
kolla_ansible_src_dir: "{{ ansible_env.PWD }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible"
kolla_ansible_local_src_dir: "{{ zuul.executor.work_root }}/src/{{ zuul.project.canonical_hostname }}/openstack/kolla-ansible"
infra_dockerhub_mirror: "http://{{ zuul_site_mirror_fqdn }}:8082/"
@ -71,9 +72,9 @@
set_fact:
need_build_image: true
when:
# NOTE(yoctozepto): if there is any tested change that does not belong to kolla-ansible,
# then kolla images should be built
- item.project.short_name != "kolla-ansible"
# NOTE(yoctozepto): build container images if there is any tested
# change that impacts them.
- item.project.short_name not in ["ansible-collection-kolla", "kayobe", "kolla-ansible"]
with_items: "{{ zuul['items'] }}"
# NOTE(yoctozepto): required to template template_overrides.j2 for Zuul
@ -85,6 +86,17 @@
- block:
# NOTE(mgoddard): This only affects the remote copy of the repo, not the
# one on the executor.
- name: checkout the previous ansible-collection-kolla branch
shell:
cmd: |
git checkout stable/{{ previous_release | lower }}
echo "ansible-collection-kolla checked out to:"
git log --pretty=oneline -1
chdir: "{{ ansible_collection_kolla_src_dir }}"
# TODO(mgoddard): Do this unconditionally when previous_release is
# yoga.
when: previous_release != 'xena'
- name: checkout the previous kolla-ansible branch
shell:
cmd: |
@ -187,6 +199,26 @@
dest: ironic-agent.kernel
when: scenario == "ironic"
# TODO(mgoddard): Do this unconditionally when previous_release is yoga.
- block:
- name: slurp requirements.yml
slurp:
src: "{{ kolla_ansible_src_dir }}/requirements.yml"
register: requirements_yml
- name: write requirements.yml
copy:
content: "{{ new_requirements | to_nice_yaml }}"
dest: "{{ kolla_ansible_src_dir }}/requirements.yml"
vars:
old_requirements: "{{ requirements_yml.content | b64decode | from_yaml }}"
new_requirement:
name: "{{ ansible_collection_kolla_src_dir }}"
type: dir
new_requirements:
collections: "{{ (old_requirements.collections | rejectattr('name', 'search', 'ansible-collection-kolla') | list) + [new_requirement] }}"
when: not is_upgrade or previous_release != 'xena'
- name: ensure /etc/ansible exists
file:
path: /etc/ansible
@ -214,6 +246,12 @@
ansible{{ ansible_version_constraint }}
ara<1.0.0
# TODO(mgoddard): do this unconditionally when previous release is Yoga.
- name: install Ansible collections
command: >-
kolla-ansible install-deps
when: not is_upgrade or previous_release != 'xena'
- name: get ARA callback plugin path
command: "python3 -m ara.setup.callback_plugins"
changed_when: false
@ -506,6 +544,14 @@
- block:
# NOTE(mgoddard): This only affects the remote copy of the repo, not the
# one on the executor.
- name: checkout the current ansible-collection-kolla branch
shell:
cmd: |
git checkout {{ zuul.branch }}
echo "ansible-collection-kolla checked out to:"
git log --pretty=oneline -1
chdir: "{{ ansible_collection_kolla_src_dir }}"
- name: checkout the current kolla-ansible branch
shell:
cmd: |
@ -540,12 +586,33 @@
dest: /etc/kolla/config/nova/nova-compute.conf
when: item.when | default(true)
- name: slurp requirements.yml
slurp:
src: "{{ kolla_ansible_src_dir }}/requirements.yml"
register: requirements_yml
- name: write requirements.yml
copy:
content: "{{ new_requirements | to_nice_yaml }}"
dest: "{{ kolla_ansible_src_dir }}/requirements.yml"
vars:
old_requirements: "{{ requirements_yml.content | b64decode | from_yaml }}"
new_requirement:
name: "{{ ansible_collection_kolla_src_dir }}"
type: dir
new_requirements:
collections: "{{ (old_requirements.collections | rejectattr('name', 'search', 'ansible-collection-kolla') | list) + [new_requirement] }}"
- name: upgrade kolla-ansible
command: >-
python3 -m pip install --user
-c {{ upper_constraints_file }}
{{ kolla_ansible_src_dir }}
- name: install Ansible collections
command: >-
kolla-ansible install-deps
# Update passwords.yml to include any new passwords added in this
# release.
- name: move passwords.yml to passwords.yml.old

View File

@ -124,6 +124,15 @@ function find_base_dir {
fi
}
function install_deps {
echo "Installing Ansible Galaxy dependencies"
ansible-galaxy collection install -r ${BASEDIR}/requirements.yml --force
if [[ $? -ne 0 ]]; then
echo "ERROR: Failed to install Ansible Galaxy dependencies" >&2
exit 1
fi
}
function process_cmd {
echo "$ACTION : $CMD"
$CMD
@ -162,6 +171,7 @@ Environment variables:
EXTRA_OPTS Additional arguments to pass to ansible-playbook
Commands:
install-deps Install Ansible Galaxy dependencies
prechecks Do pre-deployment checks for hosts
check Do post-deployment smoke tests
mariadb_recovery Recover a completely stopped mariadb cluster
@ -211,6 +221,7 @@ cat <<EOF
--diff -D
--verbose -v
--version
install-deps
prechecks
check
mariadb_recovery
@ -405,6 +416,10 @@ done
case "$1" in
(install-deps)
install_deps
exit 0
;;
(prechecks)
ACTION="Pre-deployment checking"
EXTRA_OPTS="$EXTRA_OPTS -e kolla_action=precheck"

View File

@ -8,6 +8,7 @@
timeout: 7200
post-timeout: 1800
required-projects:
- openstack/ansible-collection-kolla
- openstack/kolla
- openstack/kolla-ansible
- openstack/requirements