From 8067adc6dc2e3a98a6e31759eb5fbeb02b50d299 Mon Sep 17 00:00:00 2001 From: Brent Eagles Date: Thu, 10 Oct 2019 13:33:05 -0230 Subject: [PATCH] Scenario 010 multinode fixups While we currently running this as voting or in the gate, we should keep it in working state in the event we want to add to some occasional queue to catch those problems that seem not to occur on standalone. Change-Id: I67a06d9288115d78bab8be46d42e23f82492d230 --- .../scenario010-multinode-containers.yaml | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/ci/environments/scenario010-multinode-containers.yaml b/ci/environments/scenario010-multinode-containers.yaml index 4aea5586d4..207cf8061f 100644 --- a/ci/environments/scenario010-multinode-containers.yaml +++ b/ci/environments/scenario010-multinode-containers.yaml @@ -5,6 +5,8 @@ resource_registry: OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml + OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml + OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml OS::TripleO::Services::PacemakerRemote: ../../puppet/services/pacemaker_remote.yaml @@ -78,30 +80,35 @@ parameter_defaults: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu Debug: true + LocalCephAnsibleFetchDirectoryBackup: /var/lib/ceph_ansible_fetch CephAnsibleDisksConfig: - devices: - - /dev/loop3 - journal_size: 512 - osd_scenario: collocated + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: ceph_lv_data + data_vg: ceph_vg + db: ceph_lv_db + db_vg: ceph_vg + wal: ceph_lv_wal + wal_vg: ceph_vg CephPoolDefaultPgNum: 32 CephPoolDefaultSize: 1 - CephPools: - - name: altrbd - pg_num: 8 - rule_name: replicated_rule CephAnsibleExtraConfig: centos_package_dependencies: [] ceph_osd_docker_memory_limit: '1g' ceph_mds_docker_memory_limit: '1g' - CephAnsibleSkipTags: '' + mon_host_v1: { 'enabled': False } #NOTE: These ID's and keys should be regenerated for # a production deployment. What is here is suitable for # developer and CI testing only. CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' - CephClusterName: mycephcluster CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + CephAnsiblePlaybookVerbosity: 1 + CephAnsibleEnvironmentVariables: + ANSIBLE_SSH_RETRIES: '4' + DEFAULT_FORKS: '3' NovaEnableRbdBackend: true GlanceBackend: rbd GnocchiRbdPoolName: metrics