Merge "Update control-plane nodes taint"
This commit is contained in:
commit
0737a157dd
|
@ -4400,7 +4400,7 @@ spec:
|
|||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
priorityClassName: system-cluster-critical
|
||||
|
|
|
@ -4965,8 +4965,6 @@ spec:
|
|||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
|
|
|
@ -454,7 +454,7 @@ if [ -f /etc/sysconfig/docker ] ; then
|
|||
sed -i -E 's/^OPTIONS=("|'"'"')/OPTIONS=\1'"${DOCKER_OPTIONS}"' /' /etc/sysconfig/docker
|
||||
fi
|
||||
|
||||
KUBELET_ARGS="${KUBELET_ARGS} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule"
|
||||
KUBELET_ARGS="${KUBELET_ARGS} --register-with-taints=node-role.kubernetes.io/control-plane=:NoSchedule"
|
||||
KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}"
|
||||
KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}"
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ spec:
|
|||
namespace: kube-system
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
|
@ -220,7 +220,7 @@ spec:
|
|||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
# You'll want to change these labels and conditions to suit your deployment.
|
||||
|
@ -336,7 +336,7 @@ spec:
|
|||
- effect: NoExecute
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
containers:
|
||||
- name: magnum-auto-healer
|
||||
image: ${image_prefix}/magnum-auto-healer:${MAGNUM_AUTO_HEALER_TAG}
|
||||
|
|
|
@ -118,7 +118,7 @@ spec:
|
|||
app: cluster-autoscaler
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
tolerations:
|
||||
|
@ -131,7 +131,7 @@ spec:
|
|||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
serviceAccountName: cluster-autoscaler-account
|
||||
containers:
|
||||
|
|
|
@ -230,7 +230,7 @@ spec:
|
|||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/sig-storage/}csi-attacher:${CSI_ATTACHER_TAG}
|
||||
|
|
|
@ -81,7 +81,7 @@ spec:
|
|||
- effect: NoExecute
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: "" # octavia-ingress-controller needs to access /etc/kubernetes folder.
|
||||
node-role.kubernetes.io/control-plane: "" # octavia-ingress-controller needs to access /etc/kubernetes folder.
|
||||
containers:
|
||||
- name: octavia-ingress-controller
|
||||
image: ${oic_image}
|
||||
|
|
|
@ -117,7 +117,7 @@ spec:
|
|||
- effect: NoExecute
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
containers:
|
||||
- name: k8s-keystone-auth
|
||||
image: ${_prefix}k8s-keystone-auth:${K8S_KEYSTONE_AUTH_TAG}
|
||||
|
|
|
@ -29,9 +29,9 @@ done
|
|||
# Label self as master
|
||||
until [ "ok" = "$(kubectl get --raw='/healthz')" ] && \
|
||||
kubectl patch node ${INSTANCE_NAME} \
|
||||
--patch '{"metadata": {"labels": {"node-role.kubernetes.io/master": ""}}}'
|
||||
--patch '{"metadata": {"labels": {"node-role.kubernetes.io/control-plane": ""}}}'
|
||||
do
|
||||
echo "Trying to label master node with node-role.kubernetes.io/master=\"\""
|
||||
echo "Trying to label master node with node-role.kubernetes.io/control-plane=\"\""
|
||||
sleep 5s
|
||||
done
|
||||
|
||||
|
|
|
@ -336,7 +336,7 @@ spec:
|
|||
# this is to restrict CCM to only run on master nodes
|
||||
# the node selector may vary depending on your cluster setup
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
EOF
|
||||
}
|
||||
|
||||
|
|
|
@ -249,7 +249,7 @@ spec:
|
|||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
@ -319,7 +319,7 @@ spec:
|
|||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
|
|
|
@ -16,8 +16,8 @@ new_ostree_commit="$ostree_commit_input"
|
|||
function drain {
|
||||
# If there is only one master and this is the master node, skip the drain, just cordon it
|
||||
# If there is only one worker and this is the worker node, skip the drain, just cordon it
|
||||
all_masters=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/master= -o name)
|
||||
all_workers=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/master!= -o name)
|
||||
all_masters=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/control-plane= -o name)
|
||||
all_workers=$(${ssh_cmd} ${kubecontrol} get nodes --selector=node-role.kubernetes.io/control-plane!= -o name)
|
||||
if [ "node/${INSTANCE_NAME}" != "${all_masters}" ] && [ "node/${INSTANCE_NAME}" != "${all_workers}" ]; then
|
||||
${ssh_cmd} ${kubecontrol} drain ${INSTANCE_NAME} --ignore-daemonsets --delete-local-data --force
|
||||
else
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
features:
|
||||
- |
|
||||
Adds initial support for Kubernetes v1.28
|
||||
upgrade:
|
||||
- |
|
||||
The taint for control plane nodes have been updated from
|
||||
'node-role.kubernetes.io/master' to
|
||||
'node-role.kubernetes.io/control-plane', in line with upstream. Starting
|
||||
from v1.28, the old taint no longer passes conformance.
|
||||
New clusters from existing cluster templates will have this change.
|
||||
Existing clusters are not affected.
|
||||
This will be a breaking change for Kubernetes <v1.20, which is EOL and not
|
||||
supported in this version of Magnum.
|
|
@ -53,7 +53,7 @@ manifests = manifests.replace(
|
|||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
""").replace(
|
||||
"""
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
|
Loading…
Reference in New Issue