Ceph: Update failure domain overrides to support dynamic config

This PS updates the ceph failure domain overrides to support
dynamic configuration based on host/label based overrides.

Also fixes typo identified in the following ps for directories:
 * https://review.openstack.org/#/c/623670/1

Change-Id: Ia449be23353083f9a77df2b592944571c907e277
Signed-off-by: Pete Birley <pete@port.direct>
This commit is contained in:
Pete Birley 2018-12-07 18:09:48 -06:00
parent d50bd2daad
commit 7608d2c9d7
5 changed files with 18 additions and 10 deletions

View File

@ -25,6 +25,10 @@ set -ex
: "${OSD_SOFT_FORCE_ZAP:=1}"
: "${OSD_JOURNAL_PARTITION:=}"
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
export OSD_DEVICE="/var/lib/ceph/osd"
else

View File

@ -7,6 +7,10 @@ export LC_ALL=C
: "${JOURNAL_DIR:=/var/lib/ceph/journal}"
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
function is_available {
command -v $@ &>/dev/null
}
@ -95,7 +99,7 @@ if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
osd crush move "${HOSTNAME}" "${crush_failure_domain_type}=${crush_failure_domain_name}" || true
fi
}
if [ "x${CRUSH_FAILURE_DOMAIN_TYPE}" != "host" ]; then
if [ "x${CRUSH_FAILURE_DOMAIN_TYPE}" != "xhost" ]; then
if [ "x${CRUSH_FAILURE_DOMAIN_NAME}" != "xfalse" ]; then
crush_add_and_move "${CRUSH_FAILURE_DOMAIN_TYPE}" "${CRUSH_FAILURE_DOMAIN_NAME}"
elif [ "x${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}" != "xfalse" ]; then

View File

@ -48,6 +48,8 @@ metadata:
data:
ceph.conf: |
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
storage.json: |
{{ toPrettyJson .Values.conf.storage | indent 4 }}
{{- end }}
{{- end }}
{{- if .Values.manifests.configmap_etc }}

View File

@ -193,12 +193,6 @@ spec:
value: "ceph"
- name: CEPH_GET_ADMIN_KEY
value: "1"
- name: CRUSH_FAILURE_DOMAIN_TYPE
value: {{ .Values.conf.storage.failure_domain | default "host" | quote }}
- name: CRUSH_FAILURE_DOMAIN_NAME
value: {{ .Values.conf.storage.failure_domain_name | default "false" | quote }}
- name: CRUSH_FAILURE_DOMAIN_BY_HOSTNAME
value: {{ .Values.conf.storage.failure_domain_by_hostname | default "false" | quote }}
- name: NAMESPACE
valueFrom:
fieldRef:
@ -252,6 +246,10 @@ spec:
mountPath: /tmp/utils-checkDNS.sh
subPath: utils-checkDNS.sh
readOnly: true
- name: ceph-osd-etc
mountPath: /etc/ceph/storage.json
subPath: storage.json
readOnly: true
- name: ceph-osd-etc
mountPath: /etc/ceph/ceph.conf.template
subPath: ceph.conf

View File

@ -128,9 +128,9 @@ conf:
# `failure_domain_by_hostname`: Specify the portion of the hostname to use for your failure domain bucket name.
# `failure_domain_name`: Manually name the failure domain bucket name. This configuration option should only be used
# when using host based overrides.
# failure_domain: "rack"
# failure_domain_by_hostname: 1-8
# failure_domain_name: false
failure_domain: "host"
failure_domain_by_hostname: "false"
failure_domain_name: "false"
# NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
# define OSD pods that will be deployed across the cluster.