From 8e8cafcf33347d0a4301a01c9e0db6a8428bbc90 Mon Sep 17 00:00:00 2001 From: Keith Schincke Date: Fri, 12 May 2017 08:12:56 -0400 Subject: [PATCH] Remove osd_pool_default_min_size to allow Ceph cluster to do the right thing by default The default value is 0 which has the minimum number be caluclated based on the replica count from osd_pool_defaut_size. The default replica count is 3 and the calculated min_size is 2. If the replica count is 1 then the min_size is 1. ie: min_size = replica - (replica/2) Add CephPoolDefaultSize parameter to ceph-mon.yaml. This parameter defaults to 3 but can be overriden. See puppet-ceph-devel.yaml for an example Change-Id: Ie9bdd9b16bcb9f11107ece614b010e87d3ae98a9 (cherry picked from commit cdfe03566354b938e94c6863b0d4b1c3e64cc10c) --- ci/environments/scenario001-multinode.yaml | 1 + ci/environments/scenario004-multinode.yaml | 1 + environments/puppet-ceph-devel.yaml | 2 ++ puppet/services/ceph-base.yaml | 1 - puppet/services/ceph-mon.yaml | 6 +++++- ...unset-ceph-default-min-size-0297620ed99dab5b.yaml | 12 ++++++++++++ 6 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml index 2cb4daec66..9d694ea7be 100644 --- a/ci/environments/scenario001-multinode.yaml +++ b/ci/environments/scenario001-multinode.yaml @@ -102,6 +102,7 @@ parameter_defaults: CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + CephPoolDefaultSize: 1 NovaEnableRbdBackend: true CinderEnableRbdBackend: true CinderBackupBackend: ceph diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml index 68db9961fb..2475996f01 100644 --- a/ci/environments/scenario004-multinode.yaml +++ b/ci/environments/scenario004-multinode.yaml @@ -88,4 +88,5 @@ parameter_defaults: CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + CephPoolDefaultSize: 1 SwiftCeilometerPipelineEnabled: false diff --git a/environments/puppet-ceph-devel.yaml b/environments/puppet-ceph-devel.yaml index 9c8abbb48e..8fc4bf2995 100644 --- a/environments/puppet-ceph-devel.yaml +++ b/environments/puppet-ceph-devel.yaml @@ -20,3 +20,5 @@ parameter_defaults: GlanceBackend: rbd GnocchiBackend: rbd CinderEnableIscsiBackend: false + CephPoolDefaultSite: 1 + diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml index 033d3f778f..1eea3dc78b 100644 --- a/puppet/services/ceph-base.yaml +++ b/puppet/services/ceph-base.yaml @@ -91,7 +91,6 @@ outputs: service_name: ceph_base config_settings: tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage} - ceph::profile::params::osd_pool_default_min_size: 1 ceph::profile::params::osds: {/srv/data: {}} ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml index 209d838899..0cdc1bc70d 100644 --- a/puppet/services/ceph-mon.yaml +++ b/puppet/services/ceph-mon.yaml @@ -70,6 +70,10 @@ parameters: MonitoringSubscriptionCephMon: default: 'overcloud-ceph-mon' type: string + CephPoolDefaultSize: + description: default minimum replication for RBD copies + type: number + default: 3 resources: CephBase: @@ -92,7 +96,7 @@ outputs: ceph::profile::params::mon_key: {get_param: CephMonKey} ceph::profile::params::osd_pool_default_pg_num: 128 ceph::profile::params::osd_pool_default_pgp_num: 128 - ceph::profile::params::osd_pool_default_size: 3 + ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize} # repeat returns items in a list, so we need to map_merge twice tripleo::profile::base::ceph::mon::ceph_pools: map_merge: diff --git a/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml new file mode 100644 index 0000000000..fc2cb48ad1 --- /dev/null +++ b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + Removed the hard coding of osd_pool_default_min_size. Setting this value + to 1 can result in data loss in operating production deployments. Not + setting this value (or setting it to 0) will allow ceph to calculate the + value based on the current setting of osd_pool_default_size. If the + replication count is 3, then the calculated min_size is 2. If the + replication count is 1, then the calcualted min_size is 1. For a POC + deployments using a single OSD, set osd_pool_default_size = 1. See + description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/ + Added CephPoolDefaultSize to set default replication size. Default value is 3.