k8s-docker-suite-app-murano/Kubernetes/KubernetesCluster/package/Resources/scripts/addons/kube-dns-addon.yaml

153 lines
4.2 KiB
YAML

apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kube-dns
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
volumes:
- name: kubeconfig
hostPath:
path: /etc/kubernetes/kubeconfig.yaml
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.8
resources:
# TODO: Set memory limits when we've profiled the container for
# large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from
# restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthz-kubedns
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain=kubernetes.local
- --dns-port=10053
- --kubecfg-file=/etc/kubernetes/kubeconfig.yaml
volumeMounts:
- mountPath: /etc/kubernetes/kubeconfig.yaml
name: kubeconfig
readOnly: true
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
memory: 50Mi
requests:
cpu: 10m
# Note that this container shouldn't really need 50Mi of memory.
# The
# limits are set higher than expected pending investigation on
# #29688.
# The extra memory was stolen from the kubedns container to keep
# the
# net memory requested by the pod constant.
memory: 50Mi
args:
- --cmd=nslookup kubernetes.default.svc.kubernetes.local 127.0.0.1 >/dev/null
- --url=/healthz-dnsmasq
- --cmd=nslookup kubernetes.default.svc.kubernetes.local 127.0.0.1:10053 >/dev/null
- --url=/healthz-kubedns
- --port=8080
- --quiet
ports:
- containerPort: 8080
protocol: TCP
dnsPolicy: Default # Don't use cluster DNS.