Merge "Provide additional flexibility for the calico chart"

This commit is contained in:
Zuul 2018-02-06 22:10:21 +00:00 committed by Gerrit Code Review
commit 27c46b5603
24 changed files with 1262 additions and 108 deletions

View File

@ -0,0 +1,85 @@
#!/bin/sh
set -eux
{{ if empty .Values.conf.node.CALICO_IPV4POOL_CIDR }}
{{ set .Values.conf.node "CALICO_IPV4POOL_CIDR" .Values.networking.podSubnet | quote | trunc 0 }}
{{ end }}
# An idempotent script for interacting with calicoctl to instantiate
# peers, and manipulate calico settings that we must perform
# post-deployment.
CALICOCTL=/calicoctl
#####################################################
### process mesh and other cluster wide settings ###
#####################################################
# get nodeToNodeMesh value
MESH_VALUE=$(${CALICOCTL} config get nodeToNodeMesh)
# update if necessary
if [ "$MESH_VALUE" != "{{.Values.networking.settings.mesh}}" ];
then
$CALICOCTL config set nodeToNodeMesh {{.Values.networking.settings.mesh}}
fi;
# get asnumber value
AS_VALUE=$(${CALICOCTL} config get asNumber)
# update if necessary
if [ "$AS_VALUE" != "{{.Values.networking.bgp.asnumber}}" ];
then
$CALICOCTL config set asnumber {{.Values.networking.bgp.asnumber}}
fi;
#######################################################
### process ippools ###
#######################################################
# for posterity and logging
${CALICOCTL} get ipPool -o yaml
# ideally, we would support more then one pool
# and this would be a simple toYaml, but we want to
# avoid them having to spell out the podSubnet again
# or do any hackish replacement
#
# the downside here is that this embedded template
# will likely break when applied against calico v3
cat <<EOF | ${CALICOCTL} apply -f -
# process nat/ipip settings
apiVersion: v1
kind: ipPool
metadata:
cidr: {{.Values.conf.node.CALICO_IPV4POOL_CIDR}}
spec:
ipip:
enabled: {{.Values.networking.settings.ippool.ipip.enabled}}
mode: {{.Values.networking.settings.ippool.ipip.mode}}
nat-outgoing: {{.Values.networking.settings.ippool.nat_outgoing}}
disabled: {{.Values.networking.settings.ippool.disabled}}
EOF
#######################################################
### bgp peers ###
#######################################################
# for posterity and logging
${CALICOCTL} get bgpPeer -o yaml
# process IPv4 peers
{{ if .Values.networking.bgp.ipv4.peers }}
cat << EOF | ${CALICOCTL} apply -f -
{{ .Values.networking.bgp.ipv4.peers | toYaml }}
EOF
{{ end }}
# process IPv6 peers
{{ if .Values.networking.bgp.ipv6.peers }}
cat << EOF | ${CALICOCTL} apply -f -
{{ .Values.networking.bgp.ipv4.peers | toYaml }}
EOF
{{ end }}

View File

@ -0,0 +1,52 @@
#!/bin/sh
set -e
# instantiate calicoctl in /opt/bin/cni, including
# a wrapper around the bin that points to the correct
# etcd endpoint and etcd certificate data
cp /calicoctl /host/opt/cni/bin/calicoctl.bin
chmod +x /host/opt/cni/bin/calicoctl.bin
if [ ! -z "$ETCD_KEY" ];
then
DIR=$(dirname /host/$ETCD_KEY_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_KEY_FILE
$ETCD_KEY
EOF
chmod 600 /host/$ETCD_KEY_FILE
fi;
if [ ! -z "$ETCD_CA_CERT" ];
then
DIR=$(dirname /host/$ETCD_CA_CERT_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_CA_CERT_FILE
$ETCD_CA_CERT
EOF
chmod 600 /host/$ETCD_CA_CERT_FILE
fi;
if [ ! -z "$ETCD_CERT" ];
then
DIR=$(dirname /host/$ETCD_CERT_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_CERT_FILE
$ETCD_CERT
EOF
chmod 600 /host/$ETCD_CERT_FILE
fi;
cat <<EOF>/host/opt/cni/bin/calicoctl
export ETCD_ENDPOINTS=$ETCD_ENDPOINTS
if [ -e $ETCD_KEY_FILE ]; then export ETCD_KEY_FILE=$ETCD_KEY_FILE; fi;
if [ -e $ETCD_CERT_FILE ]; then export ETCD_CERT_FILE=$ETCD_CERT_FILE; fi;
if [ -e $ETCD_CA_CERT_FILE ]; then export ETCD_CA_CERT_FILE=$ETCD_CA_CERT_FILE; fi;
exec /opt/cni/bin/calicoctl.bin \$*
EOF
chmod +x /host/opt/cni/bin/calicoctl
# sleep forever
while [ 1 ]; do sleep 86400; done;

View File

@ -24,4 +24,8 @@ metadata:
data:
image-repo-sync.sh: |+
{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
install-calicoctl.sh: |+
{{ tuple "bin/_install-calicoctl.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
calico-settings.sh: |+
{{ tuple "bin/_calico-settings.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -1,49 +0,0 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.configmap_calico_config }}
{{- $envAll := . }}
{{- if empty .Values.conf.cni_network_config.mtu -}}
{{/*
#NOTE(portdirect): to err on the side of caution we subtract 20 from the physical
# MTU to account for IPIP overhead unless explicty turned off.
*/}}
{{- if eq .Values.conf.node.CALICO_IPV4POOL_IPIP "off" -}}
{{- set .Values.conf.cni_network_config "mtu" .Values.networking.mtu | quote | trunc 0 -}}
{{- else -}}
{{- set .Values.conf.cni_network_config "mtu" (sub .Values.networking.mtu 20) | quote | trunc 0 -}}
{{- end -}}
{{- end -}}
---
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: http://10.96.232.136:{{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{{ toJson $envAll.Values.conf.cni_network_config | indent 4 }}
{{- end }}

View File

@ -0,0 +1,71 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.configmap_etc }}
{{- $envAll := . }}
{{- if empty .Values.conf.cni_network_config.mtu -}}
{{/*
#NOTE(portdirect): to err on the side of caution we subtract 20 from the physical
# MTU to account for IPIP overhead unless explicty turned off.
*/}}
{{- if eq .Values.conf.node.CALICO_IPV4POOL_IPIP "off" -}}
{{- set .Values.conf.cni_network_config "mtu" .Values.networking.mtu | quote | trunc 0 -}}
{{- else -}}
{{- set .Values.conf.cni_network_config "mtu" (sub .Values.networking.mtu 20) | quote | trunc 0 -}}
{{- end -}}
{{- end -}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: calico-etc
data:
# we overlay templates found natively in the calico-node container so that we may override
# bgp configuration
bird6.cfg.mesh.template: |+
{{ tuple "etc/bird/_bird6.cfg.mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird6.cfg.no-mesh.template: |+
{{ tuple "etc/bird/_bird6.cfg.no-mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird6_ipam.cfg.template: |+
{{ tuple "etc/bird/_bird6_ipam.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird_aggr.cfg.template: |+
{{ tuple "etc/bird/_bird_aggr.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird.cfg.mesh.template: |+
{{ tuple "etc/bird/_bird.cfg.mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird.cfg.no-mesh.template: |+
{{ tuple "etc/bird/_bird.cfg.no-mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird_ipam.cfg.template: |+
{{ tuple "etc/bird/_bird_ipam.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
custom_filters6.cfg.template: |+
{{ tuple "etc/bird/_custom_filters6.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
custom_filters.cfg.template: |+
{{ tuple "etc/bird/_custom_filters.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
tunl-ip.template: |+
{{ tuple "etc/bird/_tunl-ip.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: {{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
# The CNI network configuration to install on each node.
cni_network_config: |-
{{ toJson $envAll.Values.conf.cni_network_config | indent 4 }}
{{- end }}

View File

@ -63,6 +63,7 @@ spec:
containers:
- name: calico-etcd
{{ tuple $envAll "calico_etcd" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_etcd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: CALICO_ETCD_IP
valueFrom:
@ -72,9 +73,9 @@ spec:
- /usr/local/bin/etcd
- --name=calico
- --data-dir=/var/etcd/calico-data
- --advertise-client-urls=http://$CALICO_ETCD_IP:{{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --listen-client-urls=http://0.0.0.0:{{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --listen-peer-urls=http://0.0.0.0:{{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --advertise-client-urls={{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
- --listen-client-urls={{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}://0.0.0.0:{{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --listen-peer-urls={{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}://0.0.0.0:{{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
volumeMounts:
- name: var-etcd
mountPath: /var/etcd

View File

@ -108,6 +108,7 @@ spec:
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: {{ $serviceAccountName }}
terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.node.timeout | default "30" }}
initContainers:
{{ tuple $envAll .Values.pod_dependency list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
@ -116,42 +117,34 @@ spec:
# host.
- name: calico-node
{{ tuple $envAll "calico_node" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_node | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
{{ include "helm-toolkit.utils.to_k8s_env_vars" .Values.conf.node | indent 12 }}
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
name: calico-etc
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{ include "helm-toolkit.utils.to_k8s_env_vars" .Values.conf.node | indent 12 }}
{{ if .Values.endpoints.etcd.auth.client.tls.ca}}
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.key}}
- name: ETCD_KEY_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.key }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.crt}}
- name: ETCD_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.crt }}
{{ end }}
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
@ -159,29 +152,132 @@ spec:
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /etc/calico/confd/templates/bird6.cfg.mesh.template
name: calico-etc
subPath: bird6.cfg.mesh.template
- mountPath: /etc/calico/confd/templates/bird6.cfg.no-mesh.template
name: calico-etc
subPath: bird6.cfg.no-mesh.template
- mountPath: /etc/calico/confd/templates/bird6_ipam.cfg.template
name: calico-etc
subPath: bird6_ipam.cfg.template
- mountPath: /etc/calico/confd/templates/bird_aggr.cfg.template
name: calico-etc
subPath: bird_aggr.cfg.template
- mountPath: /etc/calico/confd/templates/bird.cfg.mesh.template
name: calico-etc
subPath: bird.cfg.mesh.template
- mountPath: /etc/calico/confd/templates/bird.cfg.no-mesh.template
name: calico-etc
subPath: bird.cfg.no-mesh.template
- mountPath: /etc/calico/confd/templates/bird_ipam.cfg.template
name: calico-etc
subPath: bird_ipam.cfg.template
- mountPath: /etc/calico/confd/templates/custom_filters6.cfg.template
name: calico-etc
subPath: custom_filters6.cfg.template
- mountPath: /etc/calico/confd/templates/custom_filters.cfg.template
name: calico-etc
subPath: custom_filters.cfg.template
- mountPath: /etc/calico/confd/templates/tunl-ip.template
name: calico-etc
subPath: tunl-ip.template
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.ca }}
subPath: tls.ca
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.crt }}
subPath: tls.crt
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.key }}
subPath: tls.key
readOnly: true
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{ .Values.images.tags.calico_cni }}
{{ tuple $envAll "calico_cni" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_cni | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
name: calico-etc
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
name: calico-etc
key: cni_network_config
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-net-dir
mountPath: /host/etc/cni/net.d
{{ if .Values.manifests.daemonset_calico_node_calicoctl }}
- name: install-calicoctl
{{ tuple $envAll "calico_ctl" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_ctl | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
command:
- /tmp/install-calicoctl.sh
env:
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-etc
key: etcd_endpoints
{{ if .Values.endpoints.etcd.auth.client.tls.ca}}
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
- name: ETCD_CA_CERT
valueFrom:
secretKeyRef:
name: calico-certificates
key: tls.ca
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.key}}
- name: ETCD_KEY_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.key }}
- name: ETCD_KEY
valueFrom:
secretKeyRef:
name: calico-certificates
key: tls.key
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.crt}}
- name: ETCD_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.crt }}
- name: ETCD_CERT
valueFrom:
secretKeyRef:
name: calico-certificates
key: tls.crt
{{ end }}
volumeMounts:
- mountPath: /host/etc/calico
name: calico-cert-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /tmp/install-calicoctl.sh
name: calico-bin
subPath: install-calicoctl.sh
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.ca }}
subPath: tls.ca
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.crt }}
subPath: tls.crt
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.key }}
subPath: tls.key
readOnly: true
{{ end }}
volumes:
# Used by calico/node.
- name: lib-modules
@ -197,4 +293,18 @@ spec:
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: calico-cert-dir
hostPath:
path: /etc/calico
- name: calico-etc
configMap:
name: calico-etc
defaultMode: 0444
- name: calico-bin
configMap:
name: calico-bin
defaultMode: 0555
- name: calico-certificates
secret:
secretName: calico-certificates
{{- end }}

View File

@ -14,15 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.deployment_calico_kube_controllers }}
{{- if .Values.manifests.deployment_calico_kube_policy_controllers }}
{{- $envAll := . }}
{{- if .Values.images.local_registry.active -}}
{{- $_ := set .Values "pod_dependency" (merge .Values.dependencies.calico_kube_controllers .Values.conditional_dependencies.local_image_registry) -}}
{{- $_ := set .Values "pod_dependency" (merge .Values.dependencies.calico_kube_policy_controllers .Values.conditional_dependencies.local_image_registry) -}}
{{- else -}}
{{- $_ := set .Values "pod_dependency" .Values.dependencies.calico_kube_controllers -}}
{{- $_ := set .Values "pod_dependency" .Values.dependencies.calico_kube_policy_controllers -}}
{{- end -}}
{{- $serviceAccountName := "calico-kube-controllers"}}
{{- $serviceAccountName := "calico-kube-policy-controllers"}}
{{ tuple $envAll $envAll.Values.pod_dependency $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
@ -60,22 +60,23 @@ rules:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
name: calico-kube-policy-controllers
namespace: {{ .Release.Namespace }}
labels:
k8s-app: calico-kube-controllers
k8s-app: calico-kube-policy-controllers
{{ tuple $envAll "calico" "kube-controller" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
template:
metadata:
name: calico-kube-controllers
name: calico-kube-policy-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
k8s-app: calico-kube-policy-controllers
{{ tuple $envAll "calico" "kube-controller" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
@ -101,26 +102,46 @@ spec:
serviceAccountName: {{ $serviceAccountName }}
initContainers:
{{ tuple $envAll .Values.pod_dependency list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.policy_controller.timeout | default "30" }}
containers:
- name: calico-kube-controllers
{{ tuple $envAll "calico_kube_controllers" | include "helm-toolkit.snippets.image" | indent 10 }}
- name: calico-policy-controller
{{ tuple $envAll "calico_kube_policy_controller" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_kube_policy_controller | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
name: calico-etc
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,profile,workloadendpoint,node
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
{{ include "helm-toolkit.utils.to_k8s_env_vars" .Values.conf.policy_controller | indent 12 }}
{{ if .Values.endpoints.etcd.auth.client.tls.ca}}
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.key}}
- name: ETCD_KEY_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.key }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.crt}}
- name: ETCD_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.crt }}
{{ end }}
volumeMounts:
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.ca }}
subPath: tls.ca
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.crt }}
subPath: tls.crt
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.key }}
subPath: tls.key
readOnly: true
volumes:
- name: calico-certificates
secret:
secretName: calico-certificates
{{- end }}

View File

@ -0,0 +1,105 @@
# Generated by confd
include "bird_aggr.cfg";
include "custom_filters.cfg";
include "bird_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip}}`}} port {{.Values.networking.bgp.ipv4.mesh.port.listen}};
router id {{`{{$node_ip}}`}};
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export filter calico_ipip; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Node-to-node mesh -------------
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
{{`{{$onode_ip_key := printf "/host/%s/ip_addr_v4" .}}`}}{{`{{if exists $onode_ip_key}}`}}{{`{{$onode_ip := getv $onode_ip_key}}`}}
{{`{{$nums := split $onode_ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{$onode_ip_key}}`}}
{{`{{if eq $onode_ip ($node_ip) }}`}}# Skipping ourselves ({{`{{$node_ip}}`}})
{{`{{else if ne "" $onode_ip}}`}}protocol bgp Mesh_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$onode_ip}}`}} as {{`{{if exists $onode_as_key}}`}}{{`{{getv $onode_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
neighbor port {{.Values.networking.bgp.ipv4.mesh.port.neighbor}};
}{{`{{end}}`}}{{`{{end}}`}}{{`{{end}}`}}
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v4"}}`}}
{{`{{range gets "/global/peer_v4/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v4" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}

View File

@ -0,0 +1,89 @@
# Generated by confd
include "bird_aggr.cfg";
include "custom_filters.cfg";
include "bird_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip}}`}} port {{.Values.networking.bgp.ipv4.no_mesh.port.listen}};
router id {{`{{$node_ip}}`}};
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export filter calico_ipip; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v4"}}`}}
{{`{{range gets "/global/peer_v4/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v4" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}

View File

@ -0,0 +1,109 @@
# Generated by confd
include "bird6_aggr.cfg";
include "custom_filters6.cfg";
include "bird6_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
{{`{{$node_ip6_key := printf "/host/%s/ip_addr_v6" (getenv "NODENAME")}}`}}{{`{{$node_ip6 := getv $node_ip6_key}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip6}}`}} port {{.Values.networking.bgp.ipv6.mesh.port.listen}};
router id {{`{{$node_ip}}`}}; # Use IPv4 address since router id is 4 octets, even in MP-BGP
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export all; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{if eq "" ($node_ip6)}}`}}# IPv6 disabled on this node.
{{`{{else}}`}}{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip6}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Node-to-node mesh -------------
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
{{`{{$onode_ip_key := printf "/host/%s/ip_addr_v6" .}}`}}{{`{{if exists $onode_ip_key}}`}}{{`{{$onode_ip := getv $onode_ip_key}}`}}
{{`{{$nums := split $onode_ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{$onode_ip_key}}`}}
{{`{{if eq $onode_ip ($node_ip6) }}`}}# Skipping ourselves ({{`{{$node_ip6}}`}})
{{`{{else if eq "" $onode_ip}}`}}# No IPv6 address configured for this node
{{`{{else}}`}}protocol bgp Mesh_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$onode_ip}}`}} as {{`{{if exists $onode_as_key}}`}}{{`{{getv $onode_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
neighbor port {{.Values.networking.bgp.ipv6.mesh.port.neighbor}};
}{{`{{end}}`}}{{`{{end}}`}}{{`{{end}}`}}
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v6"}}`}}
{{`{{range gets "/global/peer_v6/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v6" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}
{{`{{end}}`}}

View File

@ -0,0 +1,92 @@
# Generated by confd
include "bird6_aggr.cfg";
include "custom_filters6.cfg";
include "bird6_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
{{`{{$node_ip6_key := printf "/host/%s/ip_addr_v6" (getenv "NODENAME")}}`}}{{`{{$node_ip6 := getv $node_ip6_key}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip6}}`}} port {{.Values.networking.bgp.ipv6.no_mesh.port.listen}};
router id {{`{{$node_ip}}`}}; # Use IPv4 address since router id is 4 octets, even in MP-BGP
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export all; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{if eq "" ($node_ip6)}}`}}# IPv6 disabled on this node.
{{`{{else}}`}}{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip6}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v6"}}`}}
{{`{{range gets "/global/peer_v6/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v6" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}
{{`{{end}}`}}

View File

@ -0,0 +1,11 @@
# Generated by confd
filter calico_pools {
calico_aggr();
custom_filters();
{{`{{range ls "/pool"}}`}}{{`{{$data := json (getv (printf "/pool/%s" .))}}`}}
if ( net ~ {{`{{$data.cidr}}`}} ) then {
accept;
}
{{`{{end}}`}}
reject;
}

View File

@ -0,0 +1,22 @@
# Generated by confd
# ------------- Static black hole addresses -------------
{{`{{if ls "/"}}`}}
protocol static {
{{`{{range ls "/"}}`}}
{{`{{$parts := split . "-"}}`}}
{{`{{$cidr := join $parts "/"}}`}}
route {{`{{$cidr}}`}} blackhole;
{{`{{end}}`}}
}
{{`{{else}}`}}# No static routes configured.{{`{{end}}`}}
# Aggregation of routes on this host; export the block, nothing beneath it.
function calico_aggr ()
{
{{`{{range ls "/"}}`}}
{{`{{$parts := split . "-"}}`}}
{{`{{$cidr := join $parts "/"}}`}}
if ( net = {{`{{$cidr}}`}} ) then { accept; }
if ( net ~ {{`{{$cidr}}`}} ) then { reject; }
{{`{{end}}`}}
}

View File

@ -0,0 +1,32 @@
# Generated by confd
filter calico_pools {
calico_aggr();
custom_filters();
{{`{{range ls "/v1/ipam/v4/pool"}}`}}{{`{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}`}}
if ( net ~ {{`{{$data.cidr}}`}} ) then {
accept;
}
{{`{{end}}`}}
reject;
}
{{`{{$network_key := printf "/bgp/v1/host/%s/network_v4" (getenv "NODENAME")}}`}}{{`{{$network := getv $network_key}}`}}
filter calico_ipip {
{{`{{range ls "/v1/ipam/v4/pool"}}`}}{{`{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}`}}
if ( net ~ {{`{{$data.cidr}}`}} ) then {
{{`{{if $data.ipip_mode}}`}}{{`{{if eq $data.ipip_mode "cross-subnet"}}`}}
if ( from ~ {{`{{$network}}`}} ) then
krt_tunnel = ""; {{`{{/* Destination in ipPool, mode is cross sub-net, route from-host on subnet, do not use IPIP */}}`}}
else
krt_tunnel = "{{`{{$data.ipip}}`}}"; {{`{{/* Destination in ipPool, mode is cross sub-net, route from-host off subnet, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
accept;
} {{`{{else}}`}}
krt_tunnel = "{{`{{$data.ipip}}`}}"; {{`{{/* Destination in ipPool, mode not cross sub-net, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
accept;
} {{`{{end}}`}} {{`{{else}}`}}
krt_tunnel = "{{`{{$data.ipip}}`}}"; {{`{{/* Destination in ipPool, mode field is not present, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
accept;
} {{`{{end}}`}}
{{`{{end}}`}}
accept; {{`{{/* Destination is not in any ipPool, accept */}}`}}
}

View File

@ -0,0 +1,13 @@
# Generated by confd
function custom_filters ()
{
{{`{{range ls "/v4"}}`}}{{`{{$data := getv (printf "/v4/%s" .)}}`}}
{{`{{ $data }}`}}
{{`{{end}}`}}
# support any addresses matching our secondary announcements
{{ range .Values.networking.bgp.ipv4.additional_cidrs }}
if ( net ~ {{ . }} ) then { accept; }
{{ end }}
}

View File

@ -0,0 +1,13 @@
# Generated by confd
function custom_filters ()
{
{{`{{range ls "/v6"}}`}}{{`{{$data := getv (printf "/v6/%s" .)}}`}}
{{`{{ $data }}`}}
{{`{{end}}`}}
# support any addresses matching our secondary announcements
{{ range .Values.networking.bgp.ipv6.additional_cidrs }}
if ( net ~ {{ . }} ) then { accept; }
{{ end }}
}

View File

@ -0,0 +1,7 @@
We must dump all pool data to this file to trigger a resync.
Otherwise, confd notices the file hasn't changed and won't
run our python update script.
{{`{{range ls "/pool"}}`}}{{`{{$data := json (getv (printf "/pool/%s" .))}}`}}
{{`{{if $data.ipip}}`}}{{`{{if not $data.disabled}}`}}{{`{{$data.cidr}}`}}{{`{{end}}`}}{{`{{end}}`}}
{{`{{end}}`}}

View File

@ -0,0 +1,100 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.job_calico_settings }}
{{- $envAll := . }}
{{- $_ := set .Values "pod_dependency" .Values.dependencies.calico_settings -}}
{{- $serviceAccountName := "calico-settings"}}
{{ tuple $envAll $envAll.Values.pod_dependency $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: calico-settings
spec:
template:
metadata:
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
{{ tuple $envAll "calico" "calico_settings" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure
initContainers:
{{ tuple $envAll .Values.pod_dependency list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: calico-settings
{{ tuple $envAll "calico_settings" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_settings | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-etc
key: etcd_endpoints
{{ if .Values.endpoints.etcd.auth.client.tls.ca}}
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.key}}
- name: ETCD_KEY_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.key }}
{{ end }}
{{ if .Values.endpoints.etcd.auth.client.tls.crt}}
- name: ETCD_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.crt }}
{{ end }}
command:
- /tmp/calico-settings.sh
volumeMounts:
- name: calico-bin
mountPath: /tmp/calico-settings.sh
subPath: calico-settings.sh
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.ca }}
subPath: tls.ca
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.crt }}
subPath: tls.crt
readOnly: true
- name: calico-certificates
mountPath: {{ .Values.endpoints.etcd.auth.client.path.key }}
subPath: tls.key
readOnly: true
volumes:
- name: calico-bin
configMap:
name: calico-bin
defaultMode: 0555
- name: calico-certificates
secret:
secretName: calico-certificates
{{- end }}

View File

@ -0,0 +1,31 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.secret_certificates }}
{{- $envAll := . }}
---
apiVersion: v1
kind: Secret
metadata:
name: calico-certificates
type: kubernetes.io/tls
data:
tls.ca: {{ .Values.endpoints.etcd.auth.client.tls.ca | default "" | b64enc }}
tls.key: {{ .Values.endpoints.etcd.auth.client.tls.key | default "" | b64enc }}
tls.crt: {{ .Values.endpoints.etcd.auth.client.tls.crt | default "" | b64enc }}
{{ end }}

View File

@ -29,7 +29,10 @@ images:
calico_etcd: quay.io/coreos/etcd:v3.1.10
calico_node: quay.io/calico/node:v2.6.5
calico_cni: quay.io/calico/cni:v1.11.2
calico_kube_controllers: quay.io/calico/kube-controllers:v1.0.2
calico_cni: quay.io/calico/cni:v1.10.0
calico_ctl: quay.io/calico/ctl:v1.6.2
calico_settings: quay.io/calico/ctl:v1.6.2
calico_kube_policy_controller: quay.io/calico/kube-policy-controller:v0.7.0
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.2.1
image_repo_sync: docker.io/docker:17.07.0
pull_policy: IfNotPresent
@ -54,6 +57,64 @@ pod:
limits:
memory: "1024Mi"
cpu: "2000m"
calico_settings:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
calico_kube_policy_controller:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
calico_node:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
calico_cni:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
calico_ctl:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
calico_etcd:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
lifecycle:
upgrades:
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
disruption_budget:
policy_controller:
min_available: 0
termination_grace_period:
policy_controller:
timeout: 5
node:
timeout: 5
dependencies:
etcd:
@ -62,7 +123,11 @@ dependencies:
services:
- service: etcd
endpoint: internal
calico_kube_controllers:
calico_settings:
services:
- service: etcd
endpoint: internal
calico_kube_policy_controllers:
services:
- service: etcd
endpoint: internal
@ -75,6 +140,7 @@ conditional_dependencies:
- service: local_image_registry
endpoint: node
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
@ -90,10 +156,27 @@ endpoints:
registry:
node: 5000
etcd:
auth:
client:
tls:
crt: null
ca: null
key: null
path:
# these must be within /etc/calico
crt: /etc/calico/pki/crt
ca: /etc/calico/pki/ca
key: /etc/calico/pki/key
scheme:
default: http
path:
default: ' ' # space required to provide a truly empty path
hosts:
default: calico-etcd
default: 10.96.232.136
host_fqdn_override:
default: null
service:
name: null
port:
client:
default: 6666
@ -112,8 +195,79 @@ networking:
#NOTE(portdirect): this should be the physical MTU, the appropriate MTU
# that calico should use will be calculated.
mtu: 1500
settings:
mesh: "on"
# technically this could be a list, today we only support
# a single podSubnet, the one above. The settings below
# will be applied to that ipPool
ippool:
ipip:
enabled: "true"
mode: "always"
nat_outgoing: "true"
disabled: "false"
bgp:
# our asnumber for bgp peering
asnumber: 64512
ipv4:
# this is a list of peer objects that will be passed
# directly to calicoctl - for global peers, the scope
# should be global and the node attribute removed
#
# apiVersion: v1
# kind: bgpPeer
# metadata:
# peerIP: 10.1.10.39
# scope: node
# node: hpnode1
# spec:
# asNumber: 64512
peers: []
# this is a list of additional IPv4 cidrs that if we
# discover IPs within them on a host, we will announce
# the address in addition to traditional pod workloads
additional_cidrs: []
mesh:
port:
neighbor: 179
listen: 179
no_mesh:
port:
neighbor: 179
listen: 179
ipv6:
# this is a list of peer objects that will be passed
# directly to calicoctl - for global peers, the scope
# should be global and the node attribute removed
#
# apiVersion: v1
# kind: bgpPeer
# metadata:
# peerIP: 2603:3024:1200:7500:7011:1dd6:1462:fa5b
# scope: node
# node: hpnode1
# spec:
# asNumber: 64512
peers: []
# this is a list of additional IPv6 cidrs that if we
# discover IPs within them on a host, we will announce
# them in addition to traditional pod workloads
additional_cidrs: []
mesh:
port:
neighbor: 179
listen: 179
no_mesh:
port:
neighbor: 179
listen: 179
conf:
etcd:
credentials:
ca: null
key: null
certificate: null
cni_network_config:
name: k8s-pod-network
cniVersion: 0.1.0
@ -129,11 +283,31 @@ conf:
k8s_auth_token: __SERVICEACCOUNT_TOKEN__
kubernetes:
kubeconfig: "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
policy_controller:
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
K8S_API: "https://kubernetes.default:443"
# Choose which controllers to run.
ENABLED_CONTROLLERS: "policy,profile,workloadendpoint,node"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
CONFIGURE_ETC_HOSTS: "true"
node:
# Cluster type to identify the deployment type
CLUSTER_TYPE:
- kubeadm
- bgp
# Describes which BGP networking backend to use gobgp, bird, none. Default is bird.
# NOTE(alanmeadows) today this chart only supports applying the bgp customizations to
# bird templates - in the future we may support gobgp as well
CALICO_NETWORKING_BACKEND: bird
# Location of the CA certificate for etcd.
ETCD_CA_CERT_FILE: ""
# Location of the client key for etcd.
ETCD_KEY_FILE: ""
# Location of the client certificate for etcd.
ETCD_CERT_FILE: ""
# Disable file logging so `kubectl logs` works.
CALICO_DISABLE_FILE_LOGGING: "true"
# Set Felix endpoint to host default action to ACCEPT.
@ -154,12 +328,21 @@ conf:
FELIX_PROMETHEUSMETRICSPORT: "9091"
# Auto-detect the BGP IP address.
IP: ""
# Detection of source interface for routing
# options include
# can-reach=DESTINATION
# interface=INTERFACE-REGEX
IP_AUTODETECTION_METHOD: first-found
IPV6_AUTODETECTION_METHOD: first-found
manifests:
configmap_bin: true
configmap_calico_config: true
configmap_etc: true
daemonset_calico_etcd: true
daemonset_calico_node: true
deployment_calico_kube_controllers: true
daemonset_calico_node_calicoctl: true
deployment_calico_kube_policy_controllers: true
job_image_repo_sync: true
job_calico_settings: true
service_calico_etcd: true
secret_certificates: true

View File

@ -29,7 +29,11 @@ limitations under the License.
{{- with $endpointMap -}}
{{- $endpointScheme := .scheme }}
{{- $endpointHost := index .hosts $endpoint | default .hosts.default}}
{{- if regexMatch "[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+" $endpointHost }}
{{- printf "%s" $typeYamlSafe -}}
{{- else }}
{{- $endpointHostname := printf "%s" $endpointHost }}
{{- printf "%s" $endpointHostname -}}
{{- end }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,34 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# This function returns the scheme for a service, it takes an tuple
# input in the form: service-type, endpoint-class, port-name. eg:
# { tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_scheme_lookup" }
# will return the scheme setting for this particular endpoint. In other words, for most endpoints
# it will return either 'http' or 'https'
{{- define "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" -}}
{{- $type := index . 0 -}}
{{- $endpoint := index . 1 -}}
{{- $port := index . 2 -}}
{{- $context := index . 3 -}}
{{- $typeYamlSafe := $type | replace "-" "_" }}
{{- $endpointMap := index $context.Values.endpoints $typeYamlSafe }}
{{- with $endpointMap -}}
{{- $endpointScheme := index .scheme $endpoint | default .scheme.default | default "http" }}
{{- printf "%s" $endpointScheme -}}
{{- end -}}
{{- end -}}

View File

@ -18,6 +18,12 @@ limitations under the License.
# definition. This is used in kubernetes-entrypoint to support dependencies
# between different services in different namespaces.
# returns: the endpoint namespace and the service name, delimited by a colon
#
# Normally, the service name is constructed dynamically from the hostname
# however when an ip address is used as the hostname, we default to
# namespace:endpointCategoryName in order to construct a valid service name
# however this can be overriden to a custom service name by defining
# .service.name within the endpoint definition
{{- define "helm-toolkit.endpoints.service_name_endpoint_with_namespace_lookup" -}}
{{- $type := index . 0 -}}
@ -29,6 +35,14 @@ limitations under the License.
{{- $endpointScheme := .scheme }}
{{- $endpointName := index .hosts $endpoint | default .hosts.default}}
{{- $endpointNamespace := .namespace | default $context.Release.Namespace }}
{{- if regexMatch "[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+" $endpointName }}
{{- if .service.name }}
{{- printf "%s:%s" $endpointNamespace .service.name -}}
{{- else -}}
{{- printf "%s:%s" $endpointNamespace $typeYamlSafe -}}
{{- end -}}
{{- else -}}
{{- printf "%s:%s" $endpointNamespace $endpointName -}}
{{- end -}}
{{- end -}}
{{- end -}}