Remove osd_pool_default_min_size to allow Ceph cluster to do the right thing by default

The default value is 0 which has the minimum number be caluclated based on the replica count
from osd_pool_defaut_size. The default replica count is 3 and the calculated min_size is 2.
If the replica count is 1 then the min_size is 1. ie: min_size = replica - (replica/2)
Add CephPoolDefaultSize parameter to ceph-mon.yaml. This parameter defaults to 3 but can
be overriden. See puppet-ceph-devel.yaml for an example

Change-Id: Ie9bdd9b16bcb9f11107ece614b010e87d3ae98a9
(cherry picked from commit cdfe035663)
This commit is contained in:
Keith Schincke 2017-05-12 08:12:56 -04:00 committed by Giulio Fidente
parent 7e4e8ab775
commit 8e8cafcf33
6 changed files with 21 additions and 2 deletions

View File

@ -102,6 +102,7 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephPoolDefaultSize: 1
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderBackupBackend: ceph

View File

@ -88,4 +88,5 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephPoolDefaultSize: 1
SwiftCeilometerPipelineEnabled: false

View File

@ -20,3 +20,5 @@ parameter_defaults:
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
CephPoolDefaultSite: 1

View File

@ -91,7 +91,6 @@ outputs:
service_name: ceph_base
config_settings:
tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
ceph::profile::params::osd_pool_default_min_size: 1
ceph::profile::params::osds: {/srv/data: {}}
ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx

View File

@ -70,6 +70,10 @@ parameters:
MonitoringSubscriptionCephMon:
default: 'overcloud-ceph-mon'
type: string
CephPoolDefaultSize:
description: default minimum replication for RBD copies
type: number
default: 3
resources:
CephBase:
@ -92,7 +96,7 @@ outputs:
ceph::profile::params::mon_key: {get_param: CephMonKey}
ceph::profile::params::osd_pool_default_pg_num: 128
ceph::profile::params::osd_pool_default_pgp_num: 128
ceph::profile::params::osd_pool_default_size: 3
ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize}
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:

View File

@ -0,0 +1,12 @@
---
fixes:
- |
Removed the hard coding of osd_pool_default_min_size. Setting this value
to 1 can result in data loss in operating production deployments. Not
setting this value (or setting it to 0) will allow ceph to calculate the
value based on the current setting of osd_pool_default_size. If the
replication count is 3, then the calculated min_size is 2. If the
replication count is 1, then the calcualted min_size is 1. For a POC
deployments using a single OSD, set osd_pool_default_size = 1. See
description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/
Added CephPoolDefaultSize to set default replication size. Default value is 3.