From d348ebc34ee223b301f639be37255056f8f34c9c Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Mon, 16 Jul 2018 13:41:10 +0200 Subject: [PATCH] Lower Ceph PGs count in scenario004 Each OSD can only host maximum 200 PGs, in scenario004 we create 9 pools to enable MDS/Manila and RGW so we need to lower the PGs count further, compared to scenario001. Also lowers the values in low-memory-usage.yaml environment file. Change-Id: If95a0e3fe5aeef61f9712d8006e0f49c11a0c90f Closes-Bug: 1781910 --- ci/environments/scenario004-multinode-containers.yaml | 6 +++++- environments/low-memory-usage.yaml | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ci/environments/scenario004-multinode-containers.yaml b/ci/environments/scenario004-multinode-containers.yaml index 6a7206611a..028654935a 100644 --- a/ci/environments/scenario004-multinode-containers.yaml +++ b/ci/environments/scenario004-multinode-containers.yaml @@ -101,7 +101,11 @@ parameter_defaults: journal_size: 512 journal_collocation: true osd_scenario: collocated - CephPoolDefaultPgNum: 32 + # Without MDS and RGW we create 5 pools, totalling 160 PGs at 32 PGs each + # With MDS and RGW instead we create 9 pools, so we lower the PG size + CephPoolDefaultPgNum: 16 + ManilaCephFSDataPoolPGNum: 16 + ManilaCephFSMetadataPoolPGNum: 16 CephPoolDefaultSize: 1 CephAnsibleExtraConfig: centos_package_dependencies: [] diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml index 07b27b1579..a4d3d9296a 100644 --- a/environments/low-memory-usage.yaml +++ b/environments/low-memory-usage.yaml @@ -20,6 +20,8 @@ parameter_defaults: # Override defaults to get HEALTH_OK with 1 OSD (for testing only) CephPoolDefaultSize: 1 - CephPoolDefaultPgNum: 32 + CephPoolDefaultPgNum: 16 + ManilaCephFSDataPoolPGNum: 16 + ManilaCephFSMetadataPoolPGNum: 16 NovaReservedHostMemory: 512