Merge "standalone/overcloud: enable the HA deployment by default"

This commit is contained in:
Zuul 2020-04-10 19:10:02 +00:00 committed by Gerrit Code Review
commit 4f8fcca622
27 changed files with 218 additions and 49 deletions

View File

@ -4,7 +4,7 @@ resource_registry:
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
OS::TripleO::Services::OVNDBs: ../../deployment/ovn/ovn-dbs-container-puppet.yaml
OS::TripleO::Services::OVNDBs: ../../deployment/ovn/ovn-dbs-pacemaker-puppet.yaml
parameter_defaults:
ControllerServices:
@ -24,7 +24,6 @@ parameter_defaults:
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
@ -44,6 +43,8 @@ parameter_defaults:
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Multipathd
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::Clustercheck
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
@ -51,3 +52,4 @@ parameter_defaults:
DockerPuppetDebug: True
NotificationDriver: 'noop'
GlanceBackend: 'file'
ContainerCli: podman

View File

@ -5,8 +5,13 @@ resource_registry:
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
# HA isn't enabled yet on this scenario, it seems to cause issues since HA is
# enabled on one node.
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-container-puppet.yaml
OS::TripleO::Services::MySQL: ../../deployment/database/mysql-container-puppet.yaml
OS::TripleO::Services::Keepalived: ../../deployment/keepalived/keepalived-container-puppet.yaml
parameter_defaults:
ControllerServices:

View File

@ -28,7 +28,6 @@ resource_registry:
OS::TripleO::Services::HeatApiCfn: ../../deployment/heat/heat-api-cfn-container-puppet.yaml
OS::TripleO::Services::HeatApiCloudwatch: ../../deployment/heat/heat-api-cloudwatch-disabled-puppet.yaml
OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml
#Needs to run scenario001
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
@ -56,7 +55,6 @@ parameter_defaults:
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor

View File

@ -20,12 +20,12 @@ resource_registry:
OS::TripleO::Services::HeatApiCloudwatch: ../../deployment/heat/heat-api-cloudwatch-disabled-puppet.yaml
OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml
OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::Rear: ../../deployment/backup-and-restore/rear-baremetal-ansible.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-container-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-container-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml

View File

@ -15,11 +15,11 @@ resource_registry:
OS::TripleO::Services::HeatEngine: ../../deployment/heat/heat-engine-container-puppet.yaml
OS::TripleO::Services::BarbicanApi: ../../deployment/barbican/barbican-api-container-puppet.yaml
OS::TripleO::Services::Zaqar: ../../deployment/zaqar/zaqar-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-container-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-container-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-container-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::BarbicanBackendSimpleCrypto: ../../deployment/barbican/barbican-backend-simple-crypto-puppet.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.

View File

@ -13,7 +13,7 @@ resource_registry:
OS::TripleO::Services::MistralExecutor: ../../deployment/mistral/mistral-executor-container-puppet.yaml
OS::TripleO::Services::MistralEventEngine: ../../deployment/mistral/mistral-event-engine-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/messaging/rpc-qdrouterd-container-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-container-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::DesignateApi: ../../deployment/experimental/designate/designate-api-container-puppet.yaml
OS::TripleO::Services::DesignateCentral: ../../deployment/experimental/designate/designate-central-container-puppet.yaml
OS::TripleO::Services::DesignateProducer: ../../deployment/experimental/designate/designate-producer-container-puppet.yaml

View File

@ -94,8 +94,5 @@ parameter_defaults:
- tty4
- tty5
- tty6
# Remove ContainerCli and EnablePaunch once this scenario is tested on CentOS8
ContainerCli: docker
EnablePaunch: true
CephConfigOverrides:
globalkey: globalvalue

View File

@ -50,7 +50,6 @@ parameter_defaults:
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor

View File

@ -10,7 +10,6 @@ resource_registry:
OS::TripleO::Services::PacemakerRemote: ../../deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml
OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml
OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml
OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml
@ -33,6 +32,7 @@ parameter_defaults:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
@ -51,7 +51,6 @@ parameter_defaults:
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
@ -122,5 +121,4 @@ parameter_defaults:
# For now, we hardcode it but soon it'll be generated in tripleo-common
OctaviaCaKeyPassphrase: 'upstreamci'
OctaviaGenerateCerts: true
# Remove ContainerCli once this scenario is tested on CentOS8
ContainerCli: docker
ContainerCli: podman

View File

@ -4,7 +4,7 @@ resource_registry:
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.

View File

@ -236,6 +236,17 @@ outputs:
container_image: {get_param: ContainerCinderBackupImage}
container_image_latest: *cinder_backup_image_pcmklatest
update_tasks:
- name: Tear-down non-HA cinder-backup container
when:
- step|int == 1
block: &cinder_backup_teardown_nonha
- name: Remove non-HA cinder-backup container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- cinder_backup
- name: cinder_backup fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_backup_fetch_retag_container_tasks
@ -262,6 +273,10 @@ outputs:
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA cinder_backup container
when:
- step|int == 0
block: *cinder_backup_teardown_nonha
- name: Prepare switch of cinder_backup image name
when:
- step|int == 0
@ -343,7 +358,7 @@ outputs:
when:
- groups['cinder_backup'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ cinder_backup_node_names }}"
loop: "{{ cinder_backup_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade cinder_backup without

View File

@ -223,6 +223,17 @@ outputs:
container_image_latest: *cinder_volume_image_pcmklatest
update_tasks:
- name: Tear-down non-HA cinder_volume container
when:
- step|int == 1
block: &cinder_volume_teardown_nonha
- name: Remove non-HA cinder_volume container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- cinder_volume
- name: cinder_volume fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_volume_fetch_retag_container_tasks
@ -249,6 +260,10 @@ outputs:
- old_cinder_volume_image_id.stdout != new_cinder_volume_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA cinder_volume container
when:
- step|int == 0
block: *cinder_volume_teardown_nonha
- name: Prepare switch of cinder_volume image name
when:
- step|int == 0
@ -330,7 +345,7 @@ outputs:
when:
- groups['cinder_volume'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ cinder_volume_node_names }}"
loop: "{{ cinder_volume_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade cinder_volume without

View File

@ -329,6 +329,37 @@ outputs:
container_image_latest: *mysql_image_pcmklatest
update_tasks:
- name: Tear-down non-HA mysql container
when:
- step|int == 1
block: &mysql_teardown_nonha
- name: "stat mysql container"
command: "podman container exists mysql"
failed_when: false
changed_when: false
register: stat_mysql_container
- name: Create clustercheck user and permissions
command:
argv: "{{ mysql_exec_data | container_exec_cmd }}"
changed_when: true
when:
- stat_mysql_container.rc == 0
vars:
mysql_exec_data:
environment:
CLUSTERCHECK_PASSWORD: {get_param: MysqlClustercheckPassword}
command:
- 'mysql'
- '/bin/sh'
- '-c'
- mysql -e "CREATE USER IF NOT EXISTS 'clustercheck'@'localhost' IDENTIFIED BY '${CLUSTERCHECK_PASSWORD}'; GRANT PROCESS ON *.* TO 'clustercheck'@'localhost' WITH GRANT OPTION;"
- name: Remove non-HA mysql container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- mysql
- name: Mariadb fetch and retag container image for pacemaker
when: step|int == 2
block: &mysql_fetch_retag_container_tasks
@ -355,6 +386,10 @@ outputs:
- old_galera_image_id.stdout != new_galera_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA mysql container
when:
- step|int == 0
block: *mysql_teardown_nonha
- vars:
mysql_upgrade_persist: {get_param: MysqlUpgradePersist}
when:
@ -483,7 +518,7 @@ outputs:
when:
- groups['mysql'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ mysql_node_names }}"
loop: "{{ mysql_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade galera without staged upgrade.

View File

@ -306,6 +306,17 @@ outputs:
container_image: {get_param: ContainerRedisImage}
container_image_latest: *redis_image_pcmklatest
update_tasks:
- name: Tear-down non-HA redis container
when:
- step|int == 1
block: &redis_teardown_nonha
- name: Remove non-HA redis container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- redis
- name: Redis fetch and retag container image for pacemaker
when: step|int == 2
block: &redis_fetch_retag_container_tasks
@ -332,6 +343,10 @@ outputs:
- old_redis_image_id.stdout != new_redis_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA redis container
when:
- step|int == 0
block: *redis_teardown_nonha
- name: Prepare switch of redis image name
when:
- step|int == 0
@ -414,7 +429,7 @@ outputs:
when:
- groups['redis'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ redis_short_node_names }}"
loop: "{{ redis_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade redis without staged

View File

@ -365,6 +365,17 @@ outputs:
container_image: {get_param: ContainerHAProxyImage}
container_image_latest: *haproxy_image_pcmklatest
update_tasks:
- name: Tear-down non-HA haproxy container
when:
- step|int == 1
block: &haproxy_teardown_nonha
- name: Remove non-HA haproxy container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- haproxy
- name: Set HAProxy upgrade facts
block: &haproxy_update_upgrade_facts
- name: set is_haproxy_bootstrap_node fact
@ -432,6 +443,10 @@ outputs:
- old_haproxy_image_id.stdout != new_haproxy_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA haproxy container
when:
- step|int == 0
block: *haproxy_teardown_nonha
- name: Prepare switch of haproxy image name
when:
- step|int == 0
@ -533,7 +548,7 @@ outputs:
when:
- groups['haproxy'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ haproxy_short_node_names }}"
loop: "{{ haproxy_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade haproxy without staged

View File

@ -323,7 +323,7 @@ outputs:
when:
- groups['manila_share'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ manila_share_node_names }}"
loop: "{{ manila_share_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade manila_share without

View File

@ -284,6 +284,19 @@ outputs:
container_image_latest: *ovn_dbs_image_pcmklatest
update_tasks:
- name: Tear-down non-HA ovn-dbs containers
when:
- step|int == 1
block: &ovn_dbs_teardown_nonha
- name: Remove non-HA ovn-dbs containers
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- ovn_north_db_server
- ovn_south_db_server
- ovn_northd
# When a schema change happens, the newer slaves don't connect
# back to the older master and end up timing out. So we clean
# up the error here until we get a fix for
@ -357,6 +370,10 @@ outputs:
- ovn_dbs_current_image.stdout != ovn_dbs_image_latest
upgrade_tasks:
- name: Tear-down non-HA ovn-dbs container
when:
- step|int == 0
block: *ovn_dbs_teardown_nonha
- name: Prepare switch of ovn-dbs image name
when:
- step|int == 0
@ -437,7 +454,7 @@ outputs:
when:
- groups['ovn_dbs'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ ovn_dbs_short_node_names }}"
loop: "{{ ovn_dbs_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade ovn_dbs without staged

View File

@ -222,7 +222,7 @@ outputs:
when:
- groups['pacemaker'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ pacemaker_short_node_names }}"
loop: "{{ pacemaker_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade pacemaker without staged

View File

@ -265,6 +265,17 @@ outputs:
container_image_latest: *rabbitmq_image_pcmklatest
update_tasks:
- name: Tear-down non-HA rabbitmq container
when:
- step|int == 1
block: &rabbitmq_teardown_nonha
- name: Remove non-HA rabbitmq container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- rabbitmq
- name: Rabbit fetch and retag container image for pacemaker
when: step|int == 2
block: &rabbitmq_fetch_retag_container_tasks
@ -291,6 +302,10 @@ outputs:
- old_rabbitmq_image_id.stdout != new_rabbitmq_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA rabbitmq container
when:
- step|int == 0
block: *rabbitmq_teardown_nonha
- name: Prepare switch of rabbitmq image name
when:
- step|int == 0
@ -377,7 +392,7 @@ outputs:
when:
- groups['oslo_messaging_notify'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ oslo_messaging_notify_node_names }}"
loop: "{{ oslo_messaging_notify_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade oslo_messaging_notify without

View File

@ -265,6 +265,17 @@ outputs:
container_image_latest: *rabbitmq_image_pcmklatest
update_tasks:
- name: Tear-down non-HA rabbitmq container
when:
- step|int == 1
block: &rabbitmq_teardown_nonha
- name: Remove non-HA rabbitmq container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- rabbitmq
- name: Rabbit fetch and retag container image for pacemaker
when: step|int == 2
block: &rabbitmq_fetch_retag_container_tasks
@ -291,6 +302,10 @@ outputs:
- old_rabbitmq_image_id.stdout != new_rabbitmq_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA rabbitmq container
when:
- step|int == 0
block: *rabbitmq_teardown_nonha
- name: Prepare switch of rabbitmq image name
when:
- step|int == 0
@ -376,7 +391,7 @@ outputs:
when:
- groups['rabbitmq'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ rabbitmq_short_node_names }}"
loop: "{{ rabbitmq_short_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade rabbitmq without staged

View File

@ -271,6 +271,17 @@ outputs:
container_image_latest: *rabbitmq_image_pcmklatest
update_tasks:
- name: Tear-down non-HA rabbitmq container
when:
- step|int == 1
block: &rabbitmq_teardown_nonha
- name: Remove non-HA rabbitmq container
include_role:
name: tripleo_container_rm
vars:
tripleo_container_cli: "{{ container_cli }}"
tripleo_containers_to_rm:
- rabbitmq
- name: Rabbit fetch and retag container image for pacemaker
when: step|int == 2
block: &rabbitmq_fetch_retag_container_tasks
@ -297,6 +308,10 @@ outputs:
- old_rabbitmq_image_id.stdout != new_rabbitmq_image_id.stdout
upgrade_tasks:
- name: Tear-down non-HA rabbitmq container
when:
- step|int == 0
block: *rabbitmq_teardown_nonha
- name: Prepare switch of rabbitmq image name
when:
- step|int == 0
@ -383,7 +398,7 @@ outputs:
when:
- groups['oslo_messaging_rpc'] | length > 1
- item.split('.')[0] in ansible_limit.split(',')
loop: "{{ oslo_messaging_rpc_node_names }}"
loop: "{{ oslo_messaging_rpc_node_names | default([]) }}"
- fail:
msg: >
You can't upgrade oslo_messaging_rpc without

View File

@ -80,7 +80,6 @@ resource_registry:
OS::TripleO::Services::GnocchiApi: OS::Heat::None
OS::TripleO::Services::GnocchiMetricd: OS::Heat::None
OS::TripleO::Services::GnocchiStatsd: OS::Heat::None
OS::TripleO::Services::HAproxy: OS::Heat::None
OS::TripleO::Services::HeatApi: OS::Heat::None
OS::TripleO::Services::HeatApiCfn: OS::Heat::None
OS::TripleO::Services::HeatApiCloudwatch: OS::Heat::None
@ -105,7 +104,7 @@ resource_registry:
OS::TripleO::Services::MistralEngine: OS::Heat::None
OS::TripleO::Services::MistralEventEngine: OS::Heat::None
OS::TripleO::Services::MistralExecutor: OS::Heat::None
OS::TripleO::Services::OVNDBs: ../../deployment/ovn/ovn-dbs-container-puppet.yaml
OS::TripleO::Services::OVNDBs: ../../deployment/ovn/ovn-dbs-pacemaker-puppet.yaml
OS::TripleO::Services::OpenStackClients: ../../deployment/clients/openstack-clients-baremetal-ansible.yaml
OS::TripleO::Services::Podman: ../../deployment/podman/podman-baremetal-ansible.yaml
OS::TripleO::Services::Rear: OS::Heat::None

View File

@ -120,8 +120,8 @@ resource_registry:
OS::TripleO::Services::CinderApi: deployment/cinder/cinder-api-container-puppet.yaml
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: deployment/cinder/cinder-scheduler-container-puppet.yaml
OS::TripleO::Services::CinderVolume: deployment/cinder/cinder-volume-container-puppet.yaml
OS::TripleO::Services::BlockStorageCinderVolume: deployment/cinder/cinder-volume-container-puppet.yaml
OS::TripleO::Services::CinderVolume: deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::BlockStorageCinderVolume: deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::Keystone: deployment/keystone/keystone-container-puppet.yaml
OS::TripleO::Services::GlanceApi: deployment/glance/glance-api-container-puppet.yaml
OS::TripleO::Services::HeatApi: deployment/heat/heat-api-container-puppet.yaml
@ -129,7 +129,7 @@ resource_registry:
OS::TripleO::Services::HeatApiCloudwatch: deployment/heat/heat-api-cloudwatch-disabled-puppet.yaml
OS::TripleO::Services::HeatEngine: deployment/heat/heat-engine-container-puppet.yaml
OS::TripleO::Services::Kernel: deployment/kernel/kernel-baremetal-ansible.yaml
OS::TripleO::Services::MySQL: deployment/database/mysql-container-puppet.yaml
OS::TripleO::Services::MySQL: deployment/database/mysql-pacemaker-puppet.yaml
OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None
OS::TripleO::Services::NeutronBgpVpnBagpipe: OS::Heat::None
OS::TripleO::Services::NeutronSfcApi: OS::Heat::None
@ -150,7 +150,7 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginNuage: deployment/neutron/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginML2Nuage: deployment/neutron/neutron-plugin-ml2-nuage.yaml
OS::TripleO::Services::NeutronCorePluginNSX: deployment/neutron/neutron-plugin-nsx-container-puppet.yaml
OS::TripleO::Services::OVNDBs: deployment/ovn/ovn-dbs-container-puppet.yaml
OS::TripleO::Services::OVNDBs: deployment/ovn/ovn-dbs-pacemaker-puppet.yaml
OS::TripleO::Services::OVNController: deployment/ovn/ovn-controller-container-puppet.yaml
OS::TripleO::Services::OvsDpdkNetcontrold: OS::Heat::None
@ -162,22 +162,22 @@ resource_registry:
OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsDpdk: OS::Heat::None
OS::TripleO::Services::Pacemaker: OS::Heat::None
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::Pacemaker: deployment/pacemaker/pacemaker-baremetal-puppet.yaml
OS::TripleO::Services::PacemakerRemote: deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::NeutronSriovHostConfig: OS::Heat::None
OS::TripleO::Services::NeutronMlnxAgent: OS::Heat::None
OS::TripleO::Services::NeutronAgentsIBConfig: OS::Heat::None
OS::TripleO::Services::OsloMessagingRpc: deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::RabbitMQ: OS::Heat::None
OS::TripleO::Services::Rear: OS::Heat::None
OS::TripleO::Services::Qdr: OS::Heat::None
OS::TripleO::Services::HAproxy: deployment/haproxy/haproxy-container-puppet.yaml
OS::TripleO::Services::HAproxy: deployment/haproxy/haproxy-pacemaker-puppet.yaml
OS::TripleO::Services::HAProxyPublicTLS: deployment/haproxy/haproxy-public-tls-inject.yaml
OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
OS::TripleO::Services::Iscsid: deployment/iscsid/iscsid-container-puppet.yaml
OS::TripleO::Services::Keepalived: deployment/keepalived/keepalived-container-puppet.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
OS::TripleO::Services::Memcached: deployment/memcached/memcached-container-puppet.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
@ -186,7 +186,7 @@ resource_registry:
# TODO(aschultz): Remove this in U as we switched to a task in the deploy
OS::TripleO::Services::SELinux: OS::Heat::None
OS::TripleO::Services::Sshd: deployment/sshd/sshd-baremetal-puppet.yaml
OS::TripleO::Services::Redis: OS::Heat::None
OS::TripleO::Services::Redis: deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::NovaApi: deployment/nova/nova-api-container-puppet.yaml
OS::TripleO::Services::NovaCompute: deployment/nova/nova-compute-container-puppet.yaml
OS::TripleO::Services::NovaConductor: deployment/nova/nova-conductor-container-puppet.yaml
@ -299,7 +299,7 @@ resource_registry:
OS::TripleO::Services::DockerRegistry: OS::Heat::None
OS::TripleO::Services::ContainerImagePrepare: deployment/container-image-prepare/container-image-prepare-baremetal-ansible.yaml
OS::TripleO::Services::CertmongerUser: deployment/certs/certmonger-user-baremetal-puppet.yaml
OS::TripleO::Services::Clustercheck: OS::Heat::None
OS::TripleO::Services::Clustercheck: deployment/pacemaker/clustercheck-container-puppet.yaml
OS::TripleO::Services::Rsyslog: OS::Heat::None
OS::TripleO::Services::RsyslogSidecar: OS::Heat::None
OS::TripleO::Services::VRTSHyperScale: OS::Heat::None

View File

@ -0,0 +1,6 @@
---
other:
- |
Pacemaker is now deployed by default on the Overcloud and Standalone
deployments. It has become the de-facto service to handle services in HA
and also Virtual IPs.

View File

@ -60,7 +60,7 @@ environments:
OS::TripleO::Standalone::Net::SoftwareConfig: ../../net-config-standalone.yaml
# OVN
OS::TripleO::Services::OVNDBs: ../../deployment/ovn/ovn-dbs-container-puppet.yaml
OS::TripleO::Services::OVNDBs: ../../deployment/ovn/ovn-dbs-pacemaker-puppet.yaml
OS::TripleO::Services::OpenStackClients: ../../deployment/clients/openstack-clients-baremetal-ansible.yaml
@ -75,7 +75,6 @@ environments:
OS::TripleO::Services::Tmpwatch: ../../deployment/logrotate/tmpwatch-install.yaml
# Disable non-openstack services that are enabled by default
OS::TripleO::Services::HAproxy: OS::Heat::None
OS::TripleO::Services::Keepalived: OS::Heat::None
# Aodh

View File

@ -21,7 +21,8 @@ role_paths = [
]
module_paths = [
'tripleo-ansible/tripleo-ansible/tripleo_ansible/ansible_plugins/modules'
'tripleo-ansible/tripleo-ansible/tripleo_ansible/ansible_plugins/modules',
'ansible-pacemaker/modules'
]
@ -50,6 +51,9 @@ def test_tht_ansible_syntax(pytestconfig):
os.environ["ANSIBLE_ROLES_PATH"] = role_path
os.environ["ANSIBLE_LIBRARY"] = mod_path
# Some variables are generated by config-download and part of TripleO inventory,
# absent from this testing
os.environ["ANSIBLE_ERROR_ON_UNDEFINED_VARS"] = "False"
run = ansible_runner.run(
playbook=play_path,

View File

@ -61,6 +61,11 @@
-fr
{{ tht_root }}/tripleo_heat_templates/tests/tht-role-requirements.yml
- name: Get ansible-pacemaker
git:
repo: https://github.com/redhat-openstack/ansible-pacemaker.git
dest: "{{ tht_root }}/tripleo_heat_templates/tests/ansible-pacemaker"
- name: Create temp folder
file:
state: directory