Ceph: luminous fixes

init osd: Ceph luminous release init osd differently. This fix detects
ceph releases and use the right process to init osd directory
mgr: Set mgr daemonset that is in Luminous

Change-Id: I99a102f24c4a8ba18a0bba873e9f752368bea594
Signed-off-by: Huamin Chen <hchen@redhat.com>
Depends-On: I17359df62a720cbd0b3ff79b1d642f99b3e81b3f
This commit is contained in:
Huamin Chen 2017-09-26 13:00:41 -04:00 committed by portdirect
parent df2f510a4d
commit eed43b8524
17 changed files with 461 additions and 84 deletions

View File

@ -54,3 +54,9 @@ pull-all-images:
pull-images:
@./tools/pull-images.sh $(filter-out $@,$(MAKECMDGOALS))
dev-deploy:
@./tools/gate/devel/start.sh $(filter-out $@,$(MAKECMDGOALS))
%:
@:

View File

@ -0,0 +1,25 @@
#!/bin/bash
set -ex
export LC_ALL=C
source variables_entrypoint.sh
source common_functions.sh
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
# Make sure rbd pool exists
if ! ceph ${CLI_OPTS} osd pool stats rbd > /dev/null 2>&1; then
ceph ${CLI_OPTS} osd pool create rbd "${RBD_POOL_PG}"
rbd pool init rbd
ceph osd crush tunables hammer
fi
log "SUCCESS"

View File

@ -10,6 +10,12 @@ function is_integer {
}
function osd_directory {
local test_luminous=$(ceph -v | egrep -q "12.2|luminous"; echo $?)
if [[ ${test_luminous} -ne 0 ]]; then
log "ERROR- need Luminous release"
exit 1
fi
if [[ ! -d /var/lib/ceph/osd ]]; then
log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
log "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
@ -23,8 +29,10 @@ function osd_directory {
# check if anything is present, if not, create an osd and its directory
if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
log "Creating osd with ceph --cluster ${CLUSTER} osd create"
OSD_ID=$(ceph --cluster ${CLUSTER} osd create)
log "Creating osd"
UUID=$(uuidgen)
OSD_SECRET=$(ceph-authtool --gen-print-key)
OSD_ID=$(echo "{\"cephx_secret\": \"${OSD_SECRET}\"}" | ceph osd new ${UUID} -i - -n client.bootstrap-osd -k "$OSD_BOOTSTRAP_KEYRING")
if is_integer "$OSD_ID"; then
log "OSD created with ID: ${OSD_ID}"
else
@ -32,22 +40,7 @@ function osd_directory {
exit 1
fi
OSD_PATH=$(get_osd_path $OSD_ID)
# create the folder and own it
mkdir -p $OSD_PATH
chown ceph. $OSD_PATH
log "created folder $OSD_PATH"
fi
# create the directory and an empty Procfile
mkdir -p /etc/forego/${CLUSTER}
echo "" > /etc/forego/${CLUSTER}/Procfile
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
OSD_PATH=$(get_osd_path $OSD_ID)
OSD_KEYRING="$OSD_PATH/keyring"
OSD_PATH=$(get_osd_path "$OSD_ID")
if [ -n "${JOURNAL_DIR}" ]; then
OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
chown -R ceph. ${JOURNAL_DIR}
@ -59,29 +52,42 @@ function osd_directory {
OSD_J=${OSD_PATH}/journal
fi
fi
# check to see if our osd has been initialized
if [ ! -e ${OSD_PATH}/keyring ]; then
chown ceph. $OSD_PATH
# create osd key and file structure
ceph-osd ${CLI_OPTS} -i $OSD_ID --mkfs --mkkey --mkjournal --osd-journal ${OSD_J} --setuser ceph --setgroup ceph
if [ ! -e $OSD_BOOTSTRAP_KEYRING ]; then
log "ERROR- $OSD_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o $OSD_BOOTSTRAP_KEYRING '"
exit 1
fi
timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING health || exit 1
# add the osd key
ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING auth add osd.${OSD_ID} -i ${OSD_KEYRING} osd 'allow *' mon 'allow profile osd' || log $1
log "done adding key"
chown ceph. ${OSD_KEYRING}
chmod 0600 ${OSD_KEYRING}
# add the osd to the crush map
OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
ceph ${CLI_OPTS} --name=osd.${OSD_ID} --keyring=${OSD_KEYRING} osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
# create the folder and own it
mkdir -p "$OSD_PATH"
chown "${CHOWN_OPT[@]}" ceph. "$OSD_PATH"
log "created folder $OSD_PATH"
# write the secret to the osd keyring file
ceph-authtool --create-keyring ${OSD_PATH}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
OSD_KEYRING="$OSD_PATH/keyring"
# init data directory
ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_J} --setuser ceph --setgroup ceph
# add the osd to the crush map
OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
ceph --name=osd.${OSD_ID} --keyring=${OSD_KEYRING} osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
fi
# create the directory and an empty Procfile
mkdir -p /etc/forego/"${CLUSTER}"
echo "" > /etc/forego/"${CLUSTER}"/Procfile
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
OSD_PATH=$(get_osd_path "$OSD_ID")
OSD_KEYRING="$OSD_PATH/keyring"
if [ -n "${JOURNAL_DIR}" ]; then
OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
chown -R ceph. ${JOURNAL_DIR}
else
if [ -n "${JOURNAL}" ]; then
OSD_J=${JOURNAL}
chown -R ceph. $(dirname ${JOURNAL_DIR})
else
OSD_J=${OSD_PATH}/journal
fi
fi
# log osd filesystem type
FS_TYPE=`stat --file-system -c "%T" ${OSD_PATH}`
log "OSD $OSD_PATH filesystem type: $FS_TYPE"
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --osd-journal ${OSD_J} -k $OSD_KEYRING" | tee -a /etc/forego/${CLUSTER}/Procfile
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS[*]} -f -i ${OSD_ID} --osd-journal ${OSD_J} -k $OSD_KEYRING" | tee -a /etc/forego/"${CLUSTER}"/Procfile
done
log "SUCCESS"
start_forego

View File

@ -0,0 +1,44 @@
#!/bin/bash
set -ex
source variables_entrypoint.sh
source common_functions.sh
if [[ ! -e /usr/bin/ceph-mgr ]]; then
log "ERROR- /usr/bin/ceph-mgr doesn't exist"
sleep infinity
fi
if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
exit 1
fi
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
if [[ ! -e $ADMIN_KEYRING ]]; then
log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon"
exit 1
fi
fi
# Check to see if our MGR has been initialized
if [ ! -e "$MGR_KEYRING" ]; then
# Create ceph-mgr key
timeout 10 ceph ${CLI_OPTS} auth get-or-create mgr."$MGR_NAME" mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o "$MGR_KEYRING"
chown --verbose ceph. "$MGR_KEYRING"
chmod 600 "$MGR_KEYRING"
fi
log "SUCCESS"
ceph -v
if [[ "$MGR_DASHBOARD" == 1 ]]; then
ceph ${CLI_OPTS} mgr module enable dashboard --force
ceph ${CLI_OPTS} config-key put mgr/dashboard/server_addr "$MGR_IP"
ceph ${CLI_OPTS} config-key put mgr/dashboard/server_port "$MGR_PORT"
fi
log "SUCCESS"
# start ceph-mgr
exec /usr/bin/ceph-mgr $DAEMON_OPTS -i "$MGR_NAME"

View File

@ -40,4 +40,4 @@ if [ "$RGW_REMOTE_CGI" -eq 1 ]; then
RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST"
fi
exec /usr/bin/radosgw $DAEMON_OPTS -n client.rgw.${RGW_NAME} -k $RGW_KEYRING --rgw-socket-path="" --rgw-zonegroup="$RGW_ZONEGROUP" --rgw-zone="$RGW_ZONE" --rgw-frontends="$RGW_FRONTENDS"
/usr/bin/radosgw $DAEMON_OPTS -n client.rgw.${RGW_NAME} -k $RGW_KEYRING --rgw-socket-path="" --rgw-zonegroup="$RGW_ZONEGROUP" --rgw-zone="$RGW_ZONE" --rgw-frontends="$RGW_FRONTENDS"

View File

@ -2,7 +2,7 @@
# LIST OF ALL DAEMON SCENARIOS AVAILABLE #
##########################################
ALL_SCENARIOS="osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal"
ALL_SCENARIOS="osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal mgr"
#########################
@ -43,6 +43,11 @@ ALL_SCENARIOS="osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_dis
: ${RGW_REMOTE_CGI_PORT:=9000}
: ${RGW_REMOTE_CGI_HOST:=0.0.0.0}
: ${RGW_USER:="cephnfs"}
: ${MGR_NAME:=${HOSTNAME}}
: ${MGR_DASHBOARD:=1}
: ${MGR_IP:=0.0.0.0}
: ${MGR_PORT:=7000}
: ${RBD_POOL_PG:=128}
# This is ONLY used for the CLI calls, e.g: ceph $CLI_OPTS health
CLI_OPTS="--cluster ${CLUSTER}"
@ -57,6 +62,7 @@ MDS_KEYRING=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring
ADMIN_KEYRING=/etc/ceph/${CLUSTER}.client.admin.keyring
MON_KEYRING=/etc/ceph/${CLUSTER}.mon.keyring
RGW_KEYRING=/var/lib/ceph/radosgw/${RGW_NAME}/keyring
MGR_KEYRING=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring
MDS_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring
RGW_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring
OSD_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring

View File

@ -61,6 +61,8 @@ data:
{{ tuple "bin/_start_mds.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_rgw.sh: |+
{{ tuple "bin/_start_rgw.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
start_mgr.sh: |+
{{ tuple "bin/_start_mgr.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
init_rgw_ks.sh: |+
{{ tuple "bin/_init_rgw_ks.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
watch_mon_health.sh: |+
@ -77,5 +79,7 @@ data:
{{ tuple "bin/_ceph-mon-liveness.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph-mon-readiness.sh: |
{{ tuple "bin/_ceph-mon-readiness.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
ceph_rbd_pool.sh: |
{{ tuple "bin/_ceph_rbd_pool.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}
{{- end }}

View File

@ -91,6 +91,10 @@ spec:
mountPath: /start_osd.sh
subPath: start_osd.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /osd_directory.sh
subPath: osd_directory.sh

View File

@ -0,0 +1,166 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.deployment_mgr }}
{{- $envAll := . }}
{{- if .Values.deployment.ceph }}
{{- if .Values.ceph.enabled.mgr }}
{{- $dependencies := .Values.dependencies.mgr }}
---
kind: Deployment
apiVersion: apps/v1beta1
metadata:
name: ceph-mgr
spec:
replicas: {{ .Values.pod.replicas.mgr }}
template:
metadata:
labels:
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
spec:
affinity:
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
hostNetwork: true
dnsPolicy: {{ .Values.pod.dns_policy }}
serviceAccount: default
initContainers:
{{ tuple $envAll $dependencies "" | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
- name: ceph-init-dirs
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- /tmp/init_dirs.sh
volumeMounts:
- name: ceph-bin
mountPath: /tmp/init_dirs.sh
subPath: init_dirs.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
- name: pod-etc-ceph
mountPath: /etc/ceph
containers:
- name: ceph-mgr
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: MGR_PORT
value: "{{ .Values.network.port.mgr }}"
command:
- /start_mgr.sh
ports:
- containerPort: {{ .Values.network.port.mgr }}
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.mgr }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.mgr }}
timeoutSeconds: 5
volumeMounts:
- name: pod-etc-ceph
mountPath: /etc/ceph
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
- name: ceph-bin
mountPath: /start_mgr.sh
subPath: start_mgr.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
readOnly: true
- name: ceph-client-admin-keyring
mountPath: /etc/ceph/ceph.client.admin.keyring
subPath: ceph.client.admin.keyring
readOnly: true
- name: ceph-mon-keyring
mountPath: /etc/ceph/ceph.mon.keyring
subPath: ceph.mon.keyring
readOnly: true
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: ceph-bootstrap-mgr-keyring
mountPath: /var/lib/ceph/bootstrap-mgr/ceph.keyring
subPath: ceph.keyring
readOnly: false
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
volumes:
- name: pod-etc-ceph
emptyDir: {}
- name: ceph-bin
configMap:
name: ceph-bin
defaultMode: 0555
- name: ceph-etc
configMap:
name: ceph-etc
defaultMode: 0444
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph-client-admin-keyring
secret:
secretName: {{ .Values.secrets.keyrings.admin }}
- name: ceph-mon-keyring
secret:
secretName: {{ .Values.secrets.keyrings.mon }}
- name: ceph-bootstrap-osd-keyring
secret:
secretName: {{ .Values.secrets.keyrings.osd }}
- name: ceph-bootstrap-mds-keyring
secret:
secretName: {{ .Values.secrets.keyrings.mds }}
- name: ceph-bootstrap-mgr-keyring
secret:
secretName: {{ .Values.secrets.keyrings.mgr }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -17,7 +17,7 @@ limitations under the License.
{{- if .Values.manifests.job_keyring }}
{{- $envAll := . }}
{{- if .Values.deployment.storage_secrets }}
{{- range $key1, $cephBootstrapKey := tuple "mds" "osd" "rgw" "mon" }}
{{- range $key1, $cephBootstrapKey := tuple "mds" "osd" "rgw" "mon" "mgr" }}
{{- if not (and (not $envAll.Values.manifests.deployment_rgw) (eq $cephBootstrapKey "rgw")) }}
{{- $jobName := print $cephBootstrapKey "-keyring-generator" }}
---

View File

@ -0,0 +1,92 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.deployment.ceph }}
{{- $envAll := . }}
{{- $dependencies := .Values.dependencies.rbd_pool }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: ceph-rbd-pool
spec:
template:
metadata:
name: ceph-rbd-pool
labels:
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
spec:
restartPolicy: OnFailure
affinity:
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
nodeSelector:
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
serviceAccount: default
containers:
- name: ceph-rbd-pool
image: {{ .Values.images.tags.ceph_daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: RBD_POOL_PG
value: "128"
command:
- /ceph_rbd_pool.sh
volumeMounts:
- name: ceph-bin
mountPath: /ceph_rbd_pool.sh
subPath: ceph_rbd_pool.sh
readOnly: true
- name: ceph-bin
mountPath: /variables_entrypoint.sh
subPath: variables_entrypoint.sh
readOnly: true
- name: ceph-bin
mountPath: /common_functions.sh
subPath: common_functions.sh
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
- name: ceph-client-admin-keyring
mountPath: /etc/ceph/ceph.client.admin.keyring
subPath: ceph.client.admin.keyring
readOnly: true
- name: pod-var-lib-ceph
mountPath: /var/lib/ceph
readOnly: false
- name: pod-run
mountPath: /run
readOnly: false
volumes:
- name: ceph-etc
configMap:
name: ceph-etc
defaultMode: 0444
- name: ceph-bin
configMap:
name: ceph-bin
defaultMode: 0555
- name: pod-var-lib-ceph
emptyDir: {}
- name: pod-run
emptyDir:
medium: "Memory"
- name: ceph-client-admin-keyring
secret:
secretName: {{ .Values.secrets.keyrings.admin }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.service_mgr }}
{{- $envAll := . }}
{{- if .Values.deployment.ceph }}
{{- if .Values.ceph.enabled.mgr }}
---
apiVersion: v1
kind: Service
metadata:
name: ceph-mgr
spec:
ports:
- port: {{ .Values.network.port.mgr }}
protocol: TCP
targetPort: {{ .Values.network.port.mgr }}
selector:
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -24,9 +24,9 @@ images:
ks_user: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_service: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ks_endpoints: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ceph_bootstrap: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
ceph_bootstrap: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
ceph_daemon: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
ceph_daemon: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
ceph_config_helper: docker.io/port/ceph-config-helper:v1.7.5
ceph_rbd_provisioner: quay.io/external_storage/rbd-provisioner:v0.1.1
pull_policy: "IfNotPresent"
@ -47,6 +47,9 @@ labels:
rgw:
node_selector_key: ceph-rgw
node_selector_value: enabled
mgr:
node_selector_key: ceph-mgr
node_selector_value: enabled
pod:
dns_policy: "ClusterFirstWithHostNet"
@ -54,6 +57,7 @@ pod:
rgw: 1
mon_check: 1
rbd_provisioner: 2
mgr: 1
affinity:
anti:
type:
@ -104,6 +108,13 @@ pod:
limits:
memory: "50Mi"
cpu: "500m"
mgr:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
jobs:
bootstrap:
limits:
@ -147,6 +158,7 @@ secrets:
mds: ceph-bootstrap-mds-keyring
osd: ceph-bootstrap-osd-keyring
rgw: ceph-bootstrap-rgw-keyring
mgr: ceph-bootstrap-mgr-keyring
admin: ceph-client-admin-keyring
identity:
admin: ceph-keystone-admin
@ -159,6 +171,7 @@ network:
port:
mon: 6789
rgw: 8088
mgr: 7000
conf:
rgw_ks:
@ -177,56 +190,16 @@ conf:
cephx_require_signatures: false
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
osd_pool_default_pg_num: 128
osd_pool_default_pgp_num: 128
osd_pool_default_size: 3
osd_pool_default_min_size: 1
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_host: null
mon:
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 4
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_report_timeout: 300
osd:
journal_size: 100
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mon_heartbeat_interval: 30
osd_max_object_name_len: 256
#crush
osd_pool_default_crush_rule: 0
osd_crush_update_on_start: true
osd_crush_chooseleaf_type: 1
#backend
osd_objectstore: filestore
#performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
#recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_client_op_priority: 63
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
#ports
ms_bind_port_min: 6800
ms_bind_port_max: 7100
client:
rbd_cache_enabled: true
rbd_cache_writethrough_until_flush: true
rbd_default_features: "1"
mds:
mds_cache_size: 100000
dependencies:
mon:
@ -258,6 +231,7 @@ dependencies:
- service: ceph_mon
endpoint: internal
rbd_provisioner:
jobs:
services:
- service: ceph_mon
endpoint: internal
@ -275,12 +249,22 @@ dependencies:
services:
- service: identity
endpoint: internal
mgr:
jobs:
services:
- service: ceph_mon
endpoint: internal
rbd_pool:
services:
- service: ceph_mon
endpoint: internal
ceph:
rgw_keystone_auth: false
enabled:
mds: true
rgw: true
mgr: true
storage:
osd_directory: /var/lib/openstack-helm/ceph/osd
var_directory: /var/lib/openstack-helm/ceph/ceph
@ -384,6 +368,7 @@ manifests:
deployment_moncheck: true
deployment_rbd_provisioner: true
deployment_rgw: true
deployment_mgr: true
job_bootstrap: true
job_keyring: true
job_ks_endpoints: true
@ -394,6 +379,7 @@ manifests:
job_storage_admin_keys: true
secret_keystone_rgw: true
secret_keystone: true
service_mgr: true
service_mon: true
service_rgw: true
storageclass: true

View File

@ -235,6 +235,7 @@ Nodes are labeled according to their Openstack roles:
* **Ceph OSD Nodes:** ``ceph-osd``
* **Ceph MDS Nodes:** ``ceph-mds``
* **Ceph RGW Nodes:** ``ceph-rgw``
* **Ceph MGR Nodes:** ``ceph-mgr``
* **Control Plane:** ``openstack-control-plane``
* **Compute Nodes:** ``openvswitch``, ``openstack-compute-node``
@ -245,6 +246,7 @@ Nodes are labeled according to their Openstack roles:
kubectl label nodes ceph-osd=enabled --all
kubectl label nodes ceph-mds=enabled --all
kubectl label nodes ceph-rgw=enabled --all
kubectl label nodes ceph-mgr=enabled --all
kubectl label nodes openvswitch=enabled --all
kubectl label nodes openstack-compute-node=enabled --all

View File

@ -29,7 +29,7 @@ release_group: null
images:
tags:
test: docker.io/kolla/ubuntu-source-rally:4.0.0
glance_storage_init: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
glance_storage_init: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
glance_db_sync: docker.io/kolla/ubuntu-source-glance-api:3.0.3
db_drop: docker.io/kolla/ubuntu-source-heat-engine:3.0.3

View File

@ -9,7 +9,7 @@ labels:
images:
tags:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
storage_init: quay.io/attcomdev/ceph-daemon:tag-build-master-jewel-ubuntu-16.04
storage_init: docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04
db_init_indexer: docker.io/postgres:9.5
# using non-kolla images until kolla supports postgres as
# an indexer

View File

@ -23,6 +23,7 @@ if [ "x$PVC_BACKEND" == "xceph" ]; then
kubectl label nodes ceph-osd=enabled --all --overwrite
kubectl label nodes ceph-mds=enabled --all --overwrite
kubectl label nodes ceph-rgw=enabled --all --overwrite
kubectl label nodes ceph-mgr=enabled --all --overwrite
fi
if [ "x$SDN_PLUGIN" == "xovs" ]; then