Allow Ceph pools to use 1x replication

Beginning with the Pacific release, Ceph pools are not allowed to
use 1x replication by default. This is problematic for the
openstack-helm gate scripts, which frequently use 1x replication
for automated testing. This change adds Ceph configuration and
command overrides to allow those gate scripts to continue to use
1x replication for testing.

Change-Id: I21ed3e43f3773d5ea830959f1b66b35f38185ca7
This commit is contained in:
Stephen Taylor 2023-03-15 07:10:10 -06:00
parent 7d023865a6
commit 46beb87574
10 changed files with 10 additions and 5 deletions

View File

@ -14,7 +14,7 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Cinder
name: cinder
version: 0.3.6
version: 0.3.7
home: https://docs.openstack.org/cinder/latest/
icon: https://www.openstack.org/themes/openstack/images/project-mascots/Cinder/OpenStack_Project_Cinder_vertical.png
sources:

View File

@ -37,7 +37,7 @@ elif [[ $STORAGE_BACKEND =~ 'cinder.backup.drivers.ceph' ]]; then
fi
size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]')
ceph osd pool set $1 nosizechange 0
ceph osd pool set $1 size ${RBD_POOL_REPLICATION}
ceph osd pool set $1 size ${RBD_POOL_REPLICATION} --yes-i-really-mean-it
ceph osd pool set $1 nosizechange ${size_protection}
ceph osd pool set $1 crush_rule "${RBD_POOL_CRUSH_RULE}"
}

View File

@ -34,7 +34,7 @@ if [ "x$STORAGE_BACKEND" == "xcinder.volume.drivers.rbd.RBDDriver" ]; then
fi
size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]')
ceph osd pool set $1 nosizechange 0
ceph osd pool set $1 size ${RBD_POOL_REPLICATION}
ceph osd pool set $1 size ${RBD_POOL_REPLICATION} --yes-i-really-mean-it
ceph osd pool set $1 nosizechange ${size_protection}
ceph osd pool set $1 crush_rule "${RBD_POOL_CRUSH_RULE}"
}

View File

@ -14,7 +14,7 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Glance
name: glance
version: 0.4.1
version: 0.4.2
home: https://docs.openstack.org/glance/latest/
icon: https://www.openstack.org/themes/openstack/images/project-mascots/Glance/OpenStack_Project_Glance_vertical.png
sources:

View File

@ -49,7 +49,7 @@ elif [ "x$STORAGE_BACKEND" == "xrbd" ]; then
if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
ceph osd pool application enable $1 $3
fi
ceph osd pool set "$1" size "${RBD_POOL_REPLICATION}"
ceph osd pool set "$1" size "${RBD_POOL_REPLICATION}" --yes-i-really-mean-it
ceph osd pool set "$1" crush_rule "${RBD_POOL_CRUSH_RULE}"
}
ensure_pool "${RBD_POOL_NAME}" "${RBD_POOL_CHUNK_SIZE}" "${RBD_POOL_APP_NAME}"

View File

@ -56,4 +56,5 @@ cinder:
- 0.3.4 Fix Helm hooks for storage bootstrap jobs
- 0.3.5 Add Nova endpoint details to support online volume resize
- 0.3.6 Fix ceph keyring placement for uppercased backends
- 0.3.7 Allow Ceph pools to use 1x replication
...

View File

@ -35,4 +35,5 @@ glance:
- 0.3.12 Add support for using Cinder as backend
- 0.4.0 Remove support for Train and Ussuri
- 0.4.1 Remove default policy rules
- 0.4.2 Allow Ceph pools to use 1x replication
...

View File

@ -41,6 +41,7 @@ conf:
ceph:
global:
mon_host: ceph-mon-discovery.ceph.svc.cluster.local:6789
mon_allow_pool_size_one: true
EOF
helm upgrade --install ceph-openstack-config ${HELM_CHART_ROOT_PATH}/ceph-provisioners \
--namespace=openstack \

View File

@ -67,6 +67,7 @@ conf:
global:
fsid: ${CEPH_FS_ID}
mon_addr: :6789
mon_allow_pool_size_one: true
osd_pool_default_size: 1
osd:
osd_crush_chooseleaf_type: 0

View File

@ -48,6 +48,7 @@ conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
mon_allow_pool_size_one: true
pool:
crush:
tunables: ${CRUSH_TUNABLES}