[ceph-rgw] Add rwg restart job

Some updates to rgw config like zone or zonegroup changes that can
be done during bootstrap process require rgw restart.
Add restart job which when enabled will use
'kubectl rollout restart deployment'
in order to restart rgw

This will be more useful in greenfield scenarios where
we need to setup zone/zonegroups right after rgw svc up which
needs to restart rgw svc.

Change-Id: I6667237e92a8b87a06d2a59c65210c482f3b7302
This commit is contained in:
Alexander Vlasov 2020-06-09 20:14:11 -05:00 committed by chinasubbareddy mallavarapu
parent 9655817eae
commit 70b0b9b266
4 changed files with 137 additions and 0 deletions

View File

@ -0,0 +1,25 @@
#!/bin/bash
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
export LC_ALL=C
TIMEOUT="{{ .Values.conf.rgw_restart.timeout | default 600 }}s"
kubectl rollout restart deployment ceph-rgw
kubectl rollout status --timeout=${TIMEOUT} deployment ceph-rgw
if [ "$?" -ne 0 ]; then
echo "Ceph rgw deployment was not able to restart in ${TIMEOUT}"
fi

View File

@ -30,6 +30,8 @@ data:
{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}
rgw-restart.sh: |
{{ tuple "bin/_rgw-restart.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
init-dirs.sh: |
{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}

View File

@ -0,0 +1,89 @@
{{/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if and .Values.manifests.job_rgw_restart }}
{{- $envAll := . }}
{{- $serviceAccountName := printf "%s-%s" .Release.Name "rgw-restart" }}
{{ tuple $envAll "rgw_restart" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $serviceAccountName }}
rules:
- apiGroups:
- 'apps'
resources:
- deployments
verbs:
- get
- list
- update
- patch
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ $serviceAccountName }}
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
namespace: {{ $envAll.Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ $serviceAccountName }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: ceph-rgw-restart
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
spec:
template:
metadata:
labels:
{{ tuple $envAll "ceph" "rgw-restart" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
{{ dict "envAll" $envAll "podName" "ceph-rgw-restart" "containerNames" (list "init" "ceph-rgw-restart") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
spec:
{{ dict "envAll" $envAll "application" "rgw_restart" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
initContainers:
{{ tuple $envAll "rgw_restart" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: ceph-rgw-restart
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_restart | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
{{ dict "envAll" $envAll "application" "rgw_restart" "container" "ceph-rgw-restart" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
command:
- /tmp/rgw-restart.sh
volumeMounts:
- name: ceph-rgw-bin
mountPath: /tmp/rgw-restart.sh
subPath: rgw-restart.sh
readOnly: true
volumes:
- name: ceph-rgw-bin
configMap:
name: ceph-rgw-bin
defaultMode: 0555
{{- end }}

View File

@ -75,6 +75,13 @@ pod:
rgw_storage_init:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rgw_restart:
pod:
runAsUser: 65534
container:
ceph-rgw-restart:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
rgw_s3_admin:
pod:
runAsUser: 64045
@ -173,6 +180,13 @@ pod:
limits:
memory: "1024Mi"
cpu: "2000m"
rgw_restart:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
tests:
requests:
memory: "128Mi"
@ -365,6 +379,8 @@ conf:
rgw_dynamic_resharding: false
rgw_num_rados_handles: 4
rgw_override_bucket_index_max_shards: 8
rgw_restart:
timeout: 600
rgw_ks:
enabled: false
config:
@ -425,6 +441,10 @@ dependencies:
rgw:
jobs:
- ceph-rgw-storage-init
rgw_restart:
services:
- endpoint: internal
service: ceph_object_store
image_repo_sync:
services:
- endpoint: internal
@ -601,6 +621,7 @@ manifests:
deployment_rgw: true
ingress_rgw: true
job_bootstrap: false
job_rgw_restart: false
job_ceph_rgw_storage_init: true
job_image_repo_sync: true
job_ks_endpoints: true