From a89cd6b19b7bd4178e052aa4cf0ef7f59145b708 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Fri, 23 Feb 2018 17:10:08 -0500 Subject: [PATCH] Switch scenario00{1,4}-multinode-containers to Ceph bluestore Modify scenario00{1,4}-multinode to use Ceph's bluestore in place filestore. Bluestore is the default deployment method as of version 3.2 of ceph-ansible so we should test it in CI. Use pre-created lvm_volumes parameter to avoid issue with 'ceph-volume batch' mode which does not work on loopback devices. blueprint: bluestore Depends-On: I747ac3dca5afdc91538da40b9ed45591ac8d1662 Fixes-Bug: #1817688 (cherry-picked from commit e3f697df6e1ff57b6f9e2cf3d455984dbf1faf84) Change-Id: Id2658ae814b580971d559af616b8ba034dff681b --- .../scenario001-multinode-containers.yaml | 13 +++++++++---- .../scenario004-multinode-containers.yaml | 14 +++++++++----- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/ci/environments/scenario001-multinode-containers.yaml b/ci/environments/scenario001-multinode-containers.yaml index c6514707ef..2777f3f754 100644 --- a/ci/environments/scenario001-multinode-containers.yaml +++ b/ci/environments/scenario001-multinode-containers.yaml @@ -112,10 +112,15 @@ parameter_defaults: Debug: true DockerPuppetDebug: True CephAnsibleDisksConfig: - devices: - - /dev/loop3 - journal_size: 512 - osd_scenario: collocated + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: ceph_lv_data + data_vg: ceph_vg + db: ceph_lv_db + db_vg: ceph_vg + wal: ceph_lv_wal + wal_vg: ceph_vg CephPoolDefaultPgNum: 32 CephPoolDefaultSize: 1 CephPools: diff --git a/ci/environments/scenario004-multinode-containers.yaml b/ci/environments/scenario004-multinode-containers.yaml index 5c810a1a6a..e9f0f06d97 100644 --- a/ci/environments/scenario004-multinode-containers.yaml +++ b/ci/environments/scenario004-multinode-containers.yaml @@ -93,11 +93,15 @@ parameter_defaults: Debug: true DockerPuppetDebug: True CephAnsibleDisksConfig: - devices: - - /dev/loop3 - journal_size: 512 - journal_collocation: true - osd_scenario: collocated + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: ceph_lv_data + data_vg: ceph_vg + db: ceph_lv_db + db_vg: ceph_vg + wal: ceph_lv_wal + wal_vg: ceph_vg # Without MDS and RGW we create 5 pools, totalling 160 PGs at 32 PGs each # With MDS and RGW instead we create 9 pools, so we lower the PG size CephPoolDefaultPgNum: 16