Remove tripleo-common/playbooks

These playbooks have been imported into tripleo-ansible where they will
be maintained from now on. This change removes the old playbooks directory,
cleans-up its entry in setup.cfg and updates the relevant workbooks to
ensure they're pointing at the proper location.

Depends-On: Ib9be1d4910872fa38bc52fa2327bfdff37c03d06
Change-Id: I81861520972fc7ffcc8620388eb1f8288405f75a
Signed-off-by: Kevin Carter <kecarter@redhat.com>
This commit is contained in:
Kevin Carter 2019-06-13 17:33:56 -05:00 committed by Kevin Carter (cloudnull)
parent 05460a6657
commit 50e464c6f4
28 changed files with 2 additions and 1014 deletions

View File

@ -1,95 +0,0 @@
---
- hosts: Undercloud[0]
remote_user: stack
gather_facts: True
vars:
amp_ssh_key_name: "{{ amp_ssh_key_name }}"
amp_ssh_key_path: "{{ amp_ssh_key_path }}"
amp_ssh_key_data: "{{ amp_ssh_key_data }}"
amp_to_raw: "{{ amp_to_raw | bool }}"
auth_username: "{{ auth_username }}"
auth_password: "{{ auth_password }}"
auth_project_name: "{{ auth_project_name }}"
environment:
OS_USERNAME: "{{ os_username }}"
OS_USER_DOMAIN_NAME: "Default"
OS_PROJECT_DOMAIN_NAME: "Default"
NOVA_VERSION: "1.1"
OS_PROJECT_NAME: "{{ os_project_name }}"
OS_PASSWORD: "{{ os_password }}"
COMPUTE_API_VERSION: "1.1"
OS_CLOUDNAME: "overcloud"
OS_AUTH_URL: "{{ os_auth_url }}"
OS_IDENTITY_API_VERSION: "{{ os_identity_api_version }}"
OS_IMAGE_API_VERSION: "2"
OS_AUTH_TYPE: "{{ os_auth_type }}"
roles:
- octavia-undercloud
- hosts: octavia_nodes[0]
gather_facts: True
vars:
generate_certs: "{{ generate_certs }}"
octavia_confd_prefix: "/var/lib/config-data/puppet-generated/octavia"
openssl_temp_dir: "/tmp/octavia-ssl"
ca_cert_path: "{{ ca_cert_path }}"
ca_private_key_path: "{{ ca_private_key_path }}"
ca_passphrase: "{{ ca_passphrase }}"
client_cert_path: "{{ client_cert_path }}"
auth_project_name: "{{ auth_project_name }}"
auth_username: "{{ auth_username }}"
auth_password: "{{ auth_password }}"
environment:
OS_USERNAME: "{{ os_username }}"
OS_USER_DOMAIN_NAME: "Default"
OS_PROJECT_DOMAIN_NAME: "Default"
NOVA_VERSION: "1.1"
OS_PROJECT_NAME: "{{ os_project_name }}"
OS_PASSWORD: "{{ os_password }}"
COMPUTE_API_VERSION: "1.1"
OS_CLOUDNAME: "overcloud"
OS_AUTH_URL: "{{ os_auth_url }}"
OS_IDENTITY_API_VERSION: "{{ os_identity_api_version }}"
OS_IMAGE_API_VERSION: "2"
OS_AUTH_TYPE: "{{ os_auth_type }}"
roles:
- octavia-overcloud-config
- hosts: octavia_nodes
gather_facts: True
vars:
octavia_confd_prefix: "/var/lib/config-data/puppet-generated/octavia"
lb_mgmt_net_id: "{{ hostvars[groups['octavia_nodes'][0]]['lb_mgmt_net_id'] }}"
lb_mgmt_secgroup_id: "{{ hostvars[groups['octavia_nodes'][0]]['lb_mgmt_secgroup_id'] }}"
private_key_content: "{{ hostvars[groups['octavia_nodes'][0]]['private_key_content'] }}"
ca_cert_content: "{{ hostvars[groups['octavia_nodes'][0]]['ca_cert_content'] }}"
service_pem_content: "{{ hostvars[groups['octavia_nodes'][0]]['service_pem_content'] }}"
generate_certs: "{{ generate_certs }}"
ca_cert_path: "{{ ca_cert_path }}"
ca_private_key_path: "{{ ca_private_key_path }}"
ca_passphrase: "{{ ca_passphrase }}"
client_cert_path: "{{ client_cert_path }}"
auth_project_name: "{{ auth_project_name }}"
environment:
OS_USERNAME: "{{ os_username }}"
OS_USER_DOMAIN_NAME: "Default"
OS_PROJECT_DOMAIN_NAME: "Default"
NOVA_VERSION: "1.1"
OS_PROJECT_NAME: "{{ os_project_name }}"
OS_PASSWORD: "{{ os_password }}"
COMPUTE_API_VERSION: "1.1"
OS_CLOUDNAME: "overcloud"
OS_AUTH_URL: "{{ os_auth_url }}"
OS_IDENTITY_API_VERSION: "{{ os_identity_api_version }}"
OS_IMAGE_API_VERSION: "2"
OS_AUTH_TYPE: "{{ os_auth_type }}"
roles:
- octavia-controller-config
- hosts: octavia_nodes
gather_facts: True
vars:
octavia_confd_prefix: "/var/lib/config-data/puppet-generated/octavia"
container_cli: "{{ container_cli }}"
roles:
- octavia-controller-post-config

View File

@ -1,19 +0,0 @@
---
amp_image_name: ""
amp_image_filename: ""
amp_image_tag: "amphora-image"
amp_ssh_key_name: "octavia-ssh-key"
amp_ssh_key_path: ""
amp_to_raw: False
amp_hw_arch: "x86_64"
auth_username: "octavia"
auth_project_name: "service"
lb_mgmt_net_name: "lb-mgmt-net"
lb_mgmt_subnet_name: "lb-mgmt-subnet"
lb_mgmt_subnet_cidr: "172.24.0.0/16"
lb_mgmt_subnet_gateway: "172.24.0.1"
lb_mgmt_subnet_pool_start: "172.24.0.2"
lb_mgmt_subnet_pool_end: "172.24.255.254"
lb_mgmt_sec_grp_name: "lb-mgmt-sec-grp"
lb_health_mgr_sec_grp_name: "lb-health-mgr-sec-grp"
mgmt_port_dev: "o-hm0"

View File

@ -1,5 +0,0 @@
---
- name: octavia config updated
set_fact:
octavia_config_updated: true

View File

@ -1,10 +0,0 @@
dependencies:
- common
galaxy_info:
author: Red Hat
description: octavia-controller-config
license: Apache
min_ansible_version: 2.5
platforms:
- name: CentOS
- name: Fedora

View File

@ -1,22 +0,0 @@
---
- name: making sure octavia worker configuration directory exists
file:
path: "{{ octavia_confd_prefix }}{{ ca_private_key_path | dirname }}"
state: directory
selevel: s0
setype: svirt_sandbox_file_t
become: true
- name: Copying key info to octavia
become: true
copy:
content: "{{ item.content }}"
dest: "{{ octavia_confd_prefix }}/{{ item.path }}"
selevel: s0
setype: svirt_sandbox_file_t
no_log: true
loop:
- { content: "{{ private_key_content }}", path: "{{ ca_private_key_path }}" }
- { content: "{{ ca_cert_content }}", path: "{{ ca_cert_path }}" }
- { content: "{{ service_pem_content }}" , path: "{{ client_cert_path }}" }
notify:
- octavia config updated

View File

@ -1,25 +0,0 @@
---
- name: gathering controller host name
command: "hostname -f"
register: hostname
changed_when: False
- name: set node_hostname
set_fact:
node_hostname: "{{ hostname.stdout }}"
- include_tasks: certificate.yml
when: generate_certs
- include_tasks: netport.yml
- include_tasks: netinterface.yml
- name: making sure octavia common configuration directory exists
file:
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/common"
state: directory
selevel: s0
setype: svirt_sandbox_file_t
become: true
- include_tasks: octavia.yml

View File

@ -1,20 +0,0 @@
---
- name: create br-int interface file
become: true
become_user: root
template:
dest: "/etc/sysconfig/network-scripts/ifcfg-br-int"
src: "ifcfg-br-int.j2"
force: no
- name: create octavia management interface
become: true
become_user: root
template:
dest: "/etc/sysconfig/network-scripts/ifcfg-{{ mgmt_port_dev }}"
src: "ifcfg.j2"
- name: Bring the management port interface up
become: true
become_user: root
command: "ifup {{ mgmt_port_dev }}"
notify:
- octavia config updated

View File

@ -1,71 +0,0 @@
---
- name: create management port
shell: |
port_id=$(openstack port show octavia-health-manager-{{ node_hostname }}-listen-port -f value -c id 2> /dev/null)
if [[ $port_id == "" ]]; then
neutron port-create {{ lb_mgmt_net_name }} --binding:host_id={{ node_hostname }} --no-security-groups --port-security-enabled=False --device-owner Octavia:health-mgr \
--name octavia-health-manager-{{ node_hostname }}-listen-port -f value -c id
fi
register: out_mgmt_port
changed_when: "out_mgmt_port.stdout != ''"
notify:
- octavia config updated
- name: getting management port
shell: |
openstack port show octavia-health-manager-{{ node_hostname }}-listen-port -f value -c id
register: out_mgmt_port_id
changed_when: False
- name: setting fact for management network controller port ID
set_fact:
mgmt_port_id: "{{ out_mgmt_port_id.stdout }}"
- name: get management port mac
shell: |
openstack port show {{ mgmt_port_id }} -f value -c mac_address
register: out_mgmt_port_mac
changed_when: False
- name: setting fact for management network controller port MAC
set_fact:
mgmt_port_mac: "{{ out_mgmt_port_mac.stdout }}"
- name: get management port ip
shell: |
set -euo pipefail
openstack port show {{ mgmt_port_id }} -f value -c fixed_ips | cut -f1 -d, | cut -f2 -d= | tr -d "'"
register: out_mgmt_port_ip
changed_when: False
- name: setting fact for management network controller port IP
set_fact:
mgmt_port_ip: "{{ out_mgmt_port_ip.stdout }}"
- name: get management port net mask
shell: |
set -euo pipefail
subnet_id=$(openstack port show {{ mgmt_port_id }} -f value -c fixed_ips | cut -f2 -d, | cut -f2 -d= | tr -d "'" 2> /dev/null)
openstack subnet show $subnet_id -f value -c cidr 2> /dev/null
register: out_mgmt_subnet_cidr
- name: setting fact for management subnet cidr
set_fact:
mgmt_subnet_cidr: "{{ out_mgmt_subnet_cidr.stdout }}"
- name: setting fact for management network netmask
set_fact:
mgmt_port_netmask: "{{ mgmt_subnet_cidr | ipaddr('netmask') }}"
- name: get MTU for managment port
shell: |
openstack network show {{ lb_mgmt_net_name }} -f value -c mtu
register: out_mgmt_port_mtu
- name: setting fact for management port MTU
set_fact:
mgmt_port_mtu: "{{ out_mgmt_port_mtu.stdout }}"
- name: creating fact for management network health manager controller IP
set_fact:
o_hm_ip: "{{ mgmt_port_ip }}:5555"

View File

@ -1,61 +0,0 @@
---
- name: making sure octavia worker configuration directory exists
file:
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/octavia-worker"
state: directory
selevel: s0
setype: svirt_sandbox_file_t
become: true
- name: setting [controller_worker]/amp_boot_network_list
become: true
become_user: root
ini_file:
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/common/post-deploy.conf"
section: controller_worker
option: amp_boot_network_list
value: "{{ lb_mgmt_net_id }}"
selevel: s0
setype: svirt_sandbox_file_t
- name: setting [controller_worker]/amp_secgroup_list
become: true
become_user: root
ini_file:
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/common/post-deploy.conf"
section: controller_worker
option: amp_secgroup_list
value: "{{ lb_mgmt_secgroup_id }}"
selevel: s0
setype: svirt_sandbox_file_t
- name: making sure octavia health manager configuration directory exists
file:
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/octavia-health-manager"
state: directory
selevel: s0
setype: svirt_sandbox_file_t
become: true
- name: create octavia health manager configuration file
become: true
become_user: root
template:
dest: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/octavia-health-manager/manager-post-deploy.conf"
src: "manager-post-deploy.conf.j2"
selevel: s0
setype: svirt_sandbox_file_t
- name: gather facts about the service project
shell: |
openstack project show "{{ auth_project_name }}" -c id -f value
register: project_id_result
- name: setting [controller_worker]/amp_image_owner_id
become: true
become_user: root
ini_file:
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/common/post-deploy.conf"
section: controller_worker
option: amp_image_owner_id
value: "{{ project_id_result.stdout }}"

View File

@ -1,6 +0,0 @@
DEVICETYPE=ovs
TYPE=OVSBridge
BOOTPROTO=none
DEVICE=br-int
NM_CONTROLLED=no
ONBOOT=yes

View File

@ -1,19 +0,0 @@
TYPE=OVSIntPort
OVS_BRIDGE=br-int
DEVICETYPE=ovs
ONBOOT=yes
BOOTPROTO=static
IPV6_AUTOCONF=no
DEVICE={{ mgmt_port_dev }}
IPADDR={{ mgmt_port_ip }}
NETMASK={{ mgmt_port_netmask }}
NM_CONTROLLED=no
MACADDR={{ mgmt_port_mac }}
MTU={{ mgmt_port_mtu }}
OVS_EXTRA="-- set Interface {{ mgmt_port_dev }} external-ids:iface-status=active \
-- set Interface {{ mgmt_port_dev }} external-ids:attached-mac={{ mgmt_port_mac }} \
-- set Interface {{ mgmt_port_dev }} external-ids:iface-id={{mgmt_port_id }} \
-- set Interface {{ mgmt_port_dev }} external-ids:skip_cleanup=true \
-- set Interface {{ mgmt_port_dev }} mac=\"{{ mgmt_port_mac }}\" \
-- set Interface {{ mgmt_port_dev }} other-config:hwaddr={{ mgmt_port_mac }}"

View File

@ -1,2 +0,0 @@
[health_manager]
bind_ip = {{ mgmt_port_ip }}

View File

@ -1 +0,0 @@
octavia_config_updated: no

View File

@ -1,10 +0,0 @@
dependencies:
- common
galaxy_info:
author: Red Hat
description: octavia-controller-post-config
license: Apache
min_ansible_version: 2.5
platforms:
- name: CentOS
- name: Fedora

View File

@ -1,49 +0,0 @@
- debug:
msg: "Octavia services will be restarted because of updated configuration"
when: octavia_config_updated
- name: create ip list
set_fact:
o_hm_ip_list: "{% for octavia_node in groups['octavia_nodes'] %}{{ hostvars[octavia_node].o_hm_ip }}, {%endfor%}"
- name: create ip list (2)
set_fact:
o_hm_ip_list: "{{ o_hm_ip_list[:-2] }}" #remove the last two characters
- name: read the current IP list
become: true
become_user: root
shell: |
awk -F '=' -e '/^controller_ip_port_list/ { print $2; }' "{{ octavia_confd_prefix }}/etc/octavia/conf.d/octavia-worker/worker-post-deploy.conf"
register: config_contents
failed_when: config_contents.rc != 0
changed_when: false
ignore_errors: true
- name: retrieve current ip list
set_fact:
current_ip_list: "{{ config_contents.stdout }}"
# This isn't perfect as they execution order will make them not match, but we can avoid a restart
# if things have stayed the same.
- name: check if ip list needs updating
set_fact:
octavia_config_updated: true
when: current_ip_list != o_hm_ip_list
- name: update octavia worker config file
become: true
become_user: root
ini_file:
section: "health_manager"
option: "controller_ip_port_list"
value: "{{ o_hm_ip_list }}"
path: "{{ octavia_confd_prefix }}/etc/octavia/conf.d/octavia-worker/worker-post-deploy.conf"
selevel: s0
setype: svirt_sandbox_file_t
when: octavia_config_updated
- name: restart octavia containers
become: true
become_user: root
shell: "{{ container_cli }} restart $(sudo {{ container_cli }} ps -f name=octavia -q)"
when: octavia_config_updated

View File

@ -1,10 +0,0 @@
dependencies:
- common
galaxy_info:
author: Red Hat
description: octavia-overcloud-config
license: Apache
min_ansible_version: 2.5
platforms:
- name: CentOS
- name: Fedora

View File

@ -1,84 +0,0 @@
---
- name: delete temporary ssl directory
become: true
file: path={{ openssl_temp_dir }} state=absent
- name: create temporary ssl directories
become: true
file: path={{ openssl_temp_dir }}/private recurse=yes
- name: create temporary ssl newcerts directory
become: true
file: path={{ openssl_temp_dir }}/newcerts recurse=yes
- name: create index.txt
become: true
copy: content="" dest={{ openssl_temp_dir }}/index.txt force=no
- name: create serial file
become: true
copy: content="01" dest={{ openssl_temp_dir }}/serial
- name: create openssl configuration file from template
become: true
copy: src="/etc/pki/tls/openssl.cnf" dest="{{ openssl_temp_dir }}/openssl.cnf" remote_src=yes
- name: update openssl directory entry in the configuration file
become: true
ini_file: path="{{ openssl_temp_dir }}/openssl.cnf" section=" CA_default " option="dir" value="{{ openssl_temp_dir }}"
- name: update openssl ca certificate file in the configuration file
become: true
replace: path="{{ openssl_temp_dir }}/openssl.cnf" regexp="cacert.pem" replace="ca_01.pem"
- name: Generating certificate authority private key
become: true
shell: |
openssl genrsa -passout pass:{{ ca_passphrase }} -des3 -out {{ openssl_temp_dir }}/private/cakey.pem 2048
tags:
- skip_ansible_lint
- name: Reading private key
become: true
shell: cat {{ openssl_temp_dir }}/private/cakey.pem
register: private_key_data
tags:
- skip_ansible_lint
- name: Setting private key fact
set_fact:
private_key_content: "{{ private_key_data.stdout }}"
- name: Generating certificate authority certificate
become: true
shell: |
openssl req -x509 -passin pass:{{ ca_passphrase }} -new -nodes -key {{ openssl_temp_dir }}/private/cakey.pem \
-subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" \
-days 365 -config {{ openssl_temp_dir }}/openssl.cnf \
-out {{ openssl_temp_dir }}/ca_01.pem
tags:
- skip_ansible_lint
- name: Reading CA certificate
become: true
shell: cat {{ openssl_temp_dir }}/ca_01.pem
register: ca_cert_data
tags:
- skip_ansible_lint
- name: Setting CA certificate fact
set_fact:
ca_cert_content: "{{ ca_cert_data.stdout }}"
- name: Generating service private key & certificate request
become: true
shell: |
openssl req -newkey rsa:2048 -nodes -config {{ openssl_temp_dir }}/openssl.cnf -keyout {{ openssl_temp_dir }}/client.key \
-subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" \
-out {{ openssl_temp_dir }}/client.csr
- name: Signing service certificate request
become: true
shell: |
openssl ca -config {{ openssl_temp_dir }}/openssl.cnf -passin pass:{{ ca_passphrase }} -in {{ openssl_temp_dir }}/client.csr \
-days 365 -out {{ openssl_temp_dir }}/client-.pem -batch
- name: Read service private key and public certifcate
become: true
shell: |
cat {{ openssl_temp_dir }}/client-.pem {{ openssl_temp_dir }}/client.key
register: service_key_data
- name: Set service key fact
set_fact:
service_pem_content: "{{ service_key_data.stdout }}"

View File

@ -1,6 +0,0 @@
---
# Previously was a stack in the overcloud
- include_tasks: network.yml
- include_tasks: quotas.yml
- include_tasks: certs_gen.yml
when: generate_certs

View File

@ -1,93 +0,0 @@
---
- name: create management network for load balancers
shell: |
if [[ $(openstack network show {{ lb_mgmt_net_name }} > /dev/null; echo $?) -eq 1 ]]; then
openstack network create -f value -c id {{ lb_mgmt_net_name }}
fi
register: out_lb_mgmt_net
changed_when: "out_lb_mgmt_net.stdout != ''"
- name: getting management network ID
shell: |
openstack network show {{ lb_mgmt_net_name }} -f value -c id
register: out_lb_mgmt_net_id
changed_when: False
- name: setting management network ID fact
set_fact:
lb_mgmt_net_id: "{{ out_lb_mgmt_net_id.stdout }}"
- name: create subnet
shell: |
if [[ $(openstack subnet show {{ lb_mgmt_subnet_name }} > /dev/null; echo $?) -eq 1 ]]; then
openstack subnet create {{ lb_mgmt_subnet_name }} \
--allocation-pool=start={{ lb_mgmt_subnet_pool_start }},end={{ lb_mgmt_subnet_pool_end }} \
--gateway={{ lb_mgmt_subnet_gateway }} \
--network {{ lb_mgmt_net_id }} \
--subnet-range {{ lb_mgmt_subnet_cidr }}
fi
register: lb_mgmt_subnet_result
changed_when: "lb_mgmt_subnet_result.stdout != ''"
- name: create security group #get the security group id
shell: |
if [[ $(openstack security group show {{ lb_mgmt_sec_grp_name }} > /dev/null; echo $?) -eq 1 ]]; then
openstack security group create --project service --project-domain Default {{ lb_mgmt_sec_grp_name }} -f value -c id
else
openstack security group show {{ lb_mgmt_sec_grp_name }} -f value -c id
fi
register: lb_mgmt_secgroup_result
changed_when: "lb_mgmt_secgroup_result.stdout != ''"
- name: setting fact for management network security group
set_fact:
lb_mgmt_secgroup_id: "{{ lb_mgmt_secgroup_result.stdout }}"
- name: create security group rule to open amphora management ssh port
shell: |
set -euo pipefail
if [[ $(openstack security group rule list {{ lb_mgmt_sec_grp_name }} --protocol tcp --ingress -f value 2>&1 | grep "0.0.0.0/0 22:22") == "" ]]; then
openstack security group rule create --protocol tcp --dst-port 22 {{ lb_mgmt_sec_grp_name }}
fi
register: sec_group_rule_one
changed_when: "sec_group_rule_one.stdout != ''"
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"
- name: create security group rule to open amphora management API port
shell: |
set -euo pipefail
if [[ $(openstack security group rule list {{ lb_mgmt_sec_grp_name }} --protocol tcp --ingress -f value 2>&1 | grep "0.0.0.0/0 9443:9443") == "" ]]; then
openstack security group rule create --protocol tcp --dst-port 9443 {{ lb_mgmt_sec_grp_name }}
fi
register: sec_group_rule_two
changed_when: "sec_group_rule_two.stdout != ''"
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"
- name: create security group for health manager
shell: |
if [[ $(openstack security group show {{ lb_health_mgr_sec_grp_name }} > /dev/null; echo $?) -eq 1 ]]; then
openstack security group create --project service --project-domain Default {{ lb_health_mgr_sec_grp_name }} -f value -c id
else
openstack security group show {{ lb_health_mgr_sec_grp_name }} -f value -c id
fi
register: lb_health_manager_sec_grp_result
changed_when: "lb_health_manager_sec_grp_result.stdout != ''"
- name: create security group rule for health manager
shell: |
set -euo pipefail
if [[ $(openstack security group rule list {{ lb_health_mgr_sec_grp_name }} --protocol udp --ingress -f value 2>&1 | grep "0.0.0.0/0 5555:5555") == "" ]]; then
openstack security group rule create --protocol udp --dst-port 5555 {{ lb_health_mgr_sec_grp_name }}
fi
register: health_mgr_sec_grp_rule
changed_when: "health_mgr_sec_grp_rule.stdout != ''"
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"

View File

@ -1,4 +0,0 @@
---
- name: increase quotas for project used for amphora
shell: |
openstack quota set --cores -1 --ram -1 --ports -1 --instances -1 --secgroups -1 --secgroup-rules -1 {{ auth_project_name }}

View File

@ -1,10 +0,0 @@
dependencies:
- common
galaxy_info:
author: Red Hat
description: octavia-undercloud
license: Apache
min_ansible_version: 2.5
platforms:
- name: CentOS
- name: Fedora

View File

@ -1,118 +0,0 @@
---
- name: check if name is a symlink
stat:
path: "{{ image_filename }}"
register: symlnk_check
- name: bypass image naming logic if image name is provided (backwards-compatibility)
set_fact:
amphora_image: "{{ amphora_image_name }}"
when: amphora_image_name is defined and not amphora_image_name == ''
- name: set the actual glance image name if it is a symlink
set_fact:
amphora_image: "{{ (symlnk_check.stat.lnk_target | basename | splitext)[0] }}"
when: amphora_image is not defined and symlnk_check is defined and symlnk_check.stat.islnk
- name: set the actual glance image name if it is not a symlink
set_fact:
amphora_image: "{{ (image_file_result.stat.path | basename | splitext)[0] }}"
when: amphora_image is not defined and image_file_result.stat.exists and not symlnk_check.stat.islnk
- name: gather facts about the service project
shell: |
openstack project show "{{ auth_project_name }}" -c id -f value
register: project_id_result
- name: check there's an image in glance already
shell: |
openstack image list --property owner={{ project_id_result.stdout }} --private --name {{ amphora_image }} -c ID -f value
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"
register: glance_id_result
ignore_errors: true
- name: set image id fact
set_fact:
image_id: "{{ glance_id_result.stdout }}"
when: glance_id_result.rc == 0
- name: get checksum if there's an image in glance already
shell: |
openstack image show {{ glance_id_result.stdout }} -c checksum -f value
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"
when: image_id is defined
register: glance_results
ignore_errors: true
- name: set current_md5 fact from glance if image already exists there
set_fact:
current_md5: "{{ glance_results.stdout }}"
when: glance_results.rc == 0
- name: determine if the image needs to be replaced
set_fact:
replace_image: "{{ current_md5 != image_file_result.stat.md5 }}"
when: current_md5 is defined
- name: move existing image if the names match and the md5s are not the same
shell: |
ts=`openstack image show {{ image_id }} -f value -c created_at`
ts=${ts//:/}
ts=${ts//-/}
openstack image set {{ image_id }} --name "{{ amphora_image }}_$ts"
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"
when: replace_image is defined and replace_image
- name: decide whether to upload new image
set_fact:
upload_image: true
when: current_md5 is not defined or replace_image
- block:
- name: create temporary directory
tempfile:
state: directory
register: amp_tmp_dir
- name: set RAW file name
set_fact:
raw_filename: "{{ amp_tmp_dir.path }}/{{ image_filename|splitext|first|basename }}.img"
- name: convert image from qcow2 to raw
shell: |
qemu-img convert -f qcow2 -O raw {{ image_filename }} {{ raw_filename }}
- name: setting amphora format to raw
set_fact:
raw_format: raw
when: amp_to_raw
- name: upload image to glance
shell: |
openstack image create --disk-format {{ raw_format|default('qcow2') }} \
--container-format bare --tag {{ amp_image_tag }} \
--file {{ raw_filename|default(image_filename) }} \
--property hw_architecture={{ amp_hw_arch }} \
--private {{ amphora_image }}
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"
register: image_result
changed_when: "image_result.stdout != ''"
when: image_file_result.stat.exists and upload_image is defined
- name: delete converted raw image
file:
path: "{{ amp_tmp_dir.path }}"
state: absent
when: amp_to_raw

View File

@ -1,65 +0,0 @@
---
- name: set file if already set by heat variable (backwards-compatibility)
set_fact:
image_filename: "{{ amp_image_filename }}"
when: amp_image_filename is defined and not amp_image_filename == ''
- name: set location if CentOS
set_fact:
image_filename: "/usr/share/openstack-octavia-amphora-images/amphora-x64-haproxy.qcow2"
when: ansible_distribution == 'CentOS' and not image_filename is defined
- block:
- name: set location if Red Hat
set_fact:
image_filename: "/usr/share/openstack-octavia-amphora-images/octavia-amphora.qcow2"
when: ansible_distribution == 'RedHat' and not image_filename is defined
- name: check if amphora image file exists
stat:
path: "{{ image_filename }}"
follow: true
get_md5: true
register: image_file_result
- include_tasks: image_mgmt.yml
when: image_file_result.stat.exists
- name: use ssh pub key file if provided and is readable
block:
- name: check if pub key file exists
stat: path="{{ amp_ssh_key_path }}"
register: key_file_result
ignore_errors: true
- name: fail if ssh pub key file does not exist or is not readable
fail: msg="{{ amp_ssh_key_path }} does not exist or is not readable by user {{ ansible_user }}"
when: key_file_result|failed or not key_file_result.stat.exists or not key_file_result.stat.readable
- name: set amp_ssh_key_path_final
set_fact:
amp_ssh_key_path_final: "{{ amp_ssh_key_path }}"
when: amp_ssh_key_path is defined and amp_ssh_key_path != ""
- name: defaulting to public key from undercloud default keypair
block:
- name: create temp pub key file
tempfile: state=file
register: ssh_key_tmp_file
- name: copy ssh public key content to temp file
copy: content="{{ amp_ssh_key_data }}" dest="{{ ssh_key_tmp_file.path }}"
- name: set amp_ssh_key_path_final
set_fact:
amp_ssh_key_path_final: "{{ ssh_key_tmp_file.path }}"
when: amp_ssh_key_path is not defined or amp_ssh_key_path == ""
- name: upload pub key to overcloud
shell: |
openstack keypair show {{ amp_ssh_key_name }} || \
openstack keypair create --public-key {{ amp_ssh_key_path_final }} {{ amp_ssh_key_name }}
environment:
OS_USERNAME: "{{ auth_username }}"
OS_PASSWORD: "{{ auth_password }}"
OS_PROJECT_NAME: "{{ auth_project_name }}"

View File

@ -1,80 +0,0 @@
---
- hosts: keystone
tasks:
- name: Check for containerized keystone fernet repository
stat:
path: /var/lib/config-data/puppet-generated/keystone/etc/keystone/fernet-keys/
register: containerized_keystone_dir
- name: populate service facts
service_facts:
- name: Determine facts
set_fact:
is_container: containerized_keystone_dir.stat.isdir is defined and containerized_keystone_dir.stat.isdir
podman_enabled: '"tripleo_keystone.service" in ansible_facts.services'
- name: Rotate fernet keys for keystone container
block:
- name: set keystone_base
set_fact:
keystone_base: /var/lib/config-data/puppet-generated/keystone
- name: Remove previous fernet keys
shell: rm -rf /var/lib/config-data/puppet-generated/keystone/etc/keystone/fernet-keys/*
args:
warn: false
- name: Persist fernet keys to repository
copy:
dest: "{{ keystone_base }}{{ item.key }}"
content: "{{ item.value.content }}"
mode: 0600
with_dict: "{{ fernet_keys }}"
no_log: true
- name: Set permissions to match container's user
shell: chown --reference={{ keystone_base }}/etc/keystone/fernet-keys {{ keystone_base }}{{ item.key }}
args:
warn: false
with_dict: "{{ fernet_keys }}"
no_log: true
tags:
- skip_ansible_lint
- name: Restart keystone container with docker
shell: docker restart keystone
when: not podman_enabled
tags:
- skip_ansible_lint
- name: Restart keystone container
service:
name: tripleo_keystone
state: restarted
when: podman_enabled
when: is_container
- name: Rotate fernet keys for keystone (no container)
block:
- name: Remove previous fernet keys
shell: rm -rf /etc/keystone/fernet-keys/*
args:
warn: false
- name: Persist fernet keys to repository
copy:
dest: "{{ item.key }}"
content: "{{ item.value.content }}"
mode: 0600
owner: keystone
group: keystone
with_dict: "{{ fernet_keys }}"
no_log: true
- name: Reload apache
service:
name: httpd
state: reloaded
when: not is_container

View File

@ -1,126 +0,0 @@
---
- name: Update Swift rings
hosts: swift_storage,swift_proxy
gather_facts: False
any_errors_fatal: true
max_fail_percentage: 0
vars:
base_directory: "/var/lib/config-data/puppet-generated/swift/"
rebalance_is_safe: False
environment:
OS_STORAGE_URL: "{{ hostvars.localhost.undercloud_swift_url }}"
OS_AUTH_TOKEN: "{{ hostvars.localhost.os_auth_token }}"
tasks:
- name: Get reference ring checksum
run_once: true
block:
- name: Ensure /tmp/swift-rings directory exists
file: path=/tmp/swift-rings state=directory
- name: Fetch Swift rings from undercloud
command: swift --insecure download -o /tmp/swift-rings.tar.gz overcloud-swift-rings swift-rings.tar.gz
- name: Extract Swift rings
unarchive:
src: /tmp/swift-rings.tar.gz
dest: /tmp/swift-rings
remote_src: yes
- name: Get reference ring checksum
stat:
path: /tmp/swift-rings/etc/swift/object.ring.gz
register: result_reference
- name: Get file attributes of object rings
stat:
path: "{{ base_directory }}/etc/swift/object.ring.gz"
register: result
- name: Abort playbook run if consistency check fails
fail:
msg: "object.ring.gz does not match reference checksum"
when:
- result.stat.exists
- result_reference.stat.exists
- result_reference.stat.checksum != result.stat.checksum
- name: Deploy missing Swift rings
when: not result.stat.exists
block:
- name: Fetch missing Swift rings from undercloud
command: swift --insecure download -o /tmp/swift-rings.tar.gz overcloud-swift-rings swift-rings.tar.gz
- name: Extract missing Swift rings
unarchive:
src: /tmp/swift-rings.tar.gz
dest: /{{ base_directory }}
remote_src: yes
become: true
- name: Get recon data
command: cat /var/cache/swift/object.recon
register: recon
become: true
- name: Check if it is safe to continue rebalancing
set_fact:
rebalance_is_safe: True
when:
- result.stat.exists
- ((recon.stdout | from_json).object_replication_last | int) > ((result.stat.mtime) | int)
- name: Show warning and stop playbook run if unsafe
debug:
msg: "Rebalancing is unsafe at the moment, stopping. Please try again later"
when: not rebalance_is_safe
# We exit here in case there is at least one host that fails the above check
- meta: end_play
when: not rebalance_is_safe
- name: Rebalance Swift rings
run_once: true
block:
- name: Ensure /tmp/swift-rings directory exists
file: path=/tmp/swift-rings state=directory
- name: Fetch Swift rings from undercloud
command: swift --insecure download -o /tmp/swift-rings.tar.gz overcloud-swift-rings swift-rings.tar.gz
- name: Extract Swift rings
unarchive:
src: /tmp/swift-rings.tar.gz
dest: /tmp/swift-rings
remote_src: yes
# Can't use with_fileglob (see https://github.com/ansible/ansible/issues/17136)
- name: Rebalance Swift rings
command: swift-ring-builder /tmp/swift-rings/etc/swift/{{ item }} rebalance
with_items:
- object.builder
- container.builder
- account.builder
failed_when: result.rc > 1
register: result
- name: Create Swift ring archive
archive:
path:
- "/tmp/swift-rings/etc"
dest: /tmp/swift-rings.tar.gz
- name: Copy Swift rings to the undercloud
command: swift --insecure upload --object-name swift-rings.tar.gz overcloud-swift-rings /tmp/swift-rings.tar.gz
- name: Update Swift rings on all nodes
block:
- name: Fetch Swift rings from undercloud
command: swift --insecure download -o /tmp/swift-rings.tar.gz overcloud-swift-rings swift-rings.tar.gz
- name: Extract Swift rings
unarchive:
src: /tmp/swift-rings.tar.gz
dest: /{{ base_directory }}
remote_src: yes
become: true

View File

@ -41,7 +41,6 @@ data_files =
share/tripleo-common = sudoers
share/tripleo-common/container-images = container-images/*
share/tripleo-common/image-yaml = image-yaml/*
share/tripleo-common/playbooks = playbooks/*
share/tripleo-common/workbooks = workbooks/*
share/tripleo-common/healthcheck = healthcheck/*
share/ansible/roles/ = roles/*

View File

@ -48,7 +48,7 @@ workflows:
extra_vars:
fernet_keys: <% task(rotate_keys).result %>
use_openstack_credentials: true
playbook: /usr/share/tripleo-common/playbooks/rotate-keys.yaml
playbook: /usr/share/tripleo-ansible/playbooks/rotate-keys.yaml
execution_id: <% execution().id %>
on-success: send_message
publish:

View File

@ -28,7 +28,7 @@ workflows:
remote_user: tripleo-admin
become: true
become_user: root
playbook: /usr/share/tripleo-common/playbooks/swift_ring_rebalance.yaml
playbook: /usr/share/tripleo-ansible/playbooks/swift_ring_rebalance.yaml
inventory: <% $.get('work_dir') %>/<% $.get('container') %>/tripleo-ansible-inventory.yaml
use_openstack_credentials: true
execution_id: <% execution().id %>