[Calico] Update to Calico v3.2

Change-Id: I2214fea8d8c1563b08c4015c9e91a29cf071af5a
This commit is contained in:
Chris Wedgwood 2018-10-01 23:30:18 +00:00
parent 46935734af
commit 26e1b9cde6
20 changed files with 424 additions and 638 deletions

View File

@ -13,7 +13,7 @@
# limitations under the License.
apiVersion: v1
description: OpenStack-Helm BootStrap Calico
description: OpenStack-Helm Calico
name: calico
version: 0.1.0
home: https://github.com/projectcalico/calico

View File

@ -10,76 +10,61 @@ set -eux
# peers, and manipulate calico settings that we must perform
# post-deployment.
CALICOCTL=/calicoctl
CTL=/calicoctl
#####################################################
### process mesh and other cluster wide settings ###
#####################################################
# Generate configuration the way we want it to be, it doesn't matter
# if it's already set, in that case Calico will no nothing.
# get nodeToNodeMesh value
MESH_VALUE=$(${CALICOCTL} config get nodeToNodeMesh)
# update if necessary
if [ "$MESH_VALUE" != "{{.Values.networking.settings.mesh}}" ];
then
$CALICOCTL config set nodeToNodeMesh {{.Values.networking.settings.mesh}}
fi;
# get asnumber value
AS_VALUE=$(${CALICOCTL} config get asNumber)
# update if necessary
if [ "$AS_VALUE" != "{{.Values.networking.bgp.asnumber}}" ];
then
$CALICOCTL config set asnumber {{.Values.networking.bgp.asnumber}}
fi;
#######################################################
### process ippools ###
#######################################################
# for posterity and logging
${CALICOCTL} get ipPool -o yaml
# ideally, we would support more then one pool
# and this would be a simple toYaml, but we want to
# avoid them having to spell out the podSubnet again
# or do any hackish replacement
#
# the downside here is that this embedded template
# will likely break when applied against calico v3
cat <<EOF | ${CALICOCTL} apply -f -
# process nat/ipip settings
apiVersion: v1
kind: ipPool
# BGPConfiguration: nodeToNodeMeshEnabled & asNumber
$CTL apply -f - <<EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
cidr: {{.Values.conf.node.CALICO_IPV4POOL_CIDR}}
name: default
spec:
ipip:
enabled: {{.Values.networking.settings.ippool.ipip.enabled}}
mode: {{.Values.networking.settings.ippool.ipip.mode}}
nat-outgoing: {{.Values.networking.settings.ippool.nat_outgoing}}
disabled: {{.Values.networking.settings.ippool.disabled}}
logSeverityScreen: Info
nodeToNodeMeshEnabled: {{ .Values.networking.settings.mesh }}
asNumber: {{ .Values.networking.bgp.asnumber }}
EOF
#######################################################
### bgp peers ###
#######################################################
# FelixConfiguration: ipipEnabled
$CTL apply -f - <<EOF
apiVersion: projectcalico.org/v3
kind: FelixConfiguration
metadata:
name: default
spec:
ipipEnabled: {{ .Values.networking.settings.ippool.ipip.enabled }}
logSeverityScreen: Info
EOF
# for posterity and logging
${CALICOCTL} get bgpPeer -o yaml
# ipPool - https://docs.projectcalico.org/v3.2/reference/calicoctl/resources/ippool
$CTL apply -f - <<EOF
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: default-ipv4-ippool
spec:
cidr: {{ .Values.conf.node.CALICO_IPV4POOL_CIDR }}
ipipMode: {{ .Values.networking.settings.ippool.ipip.mode }}
natOutgoing: {{ .Values.networking.settings.ippool.nat_outgoing }}
disabled: {{ .Values.networking.settings.ippool.disabled }}
EOF
# process IPv4 peers
# IPv4 peers
{{ if .Values.networking.bgp.ipv4.peers }}
cat << EOF | ${CALICOCTL} apply -f -
$CTL apply -f - <<EOF
{{ .Values.networking.bgp.ipv4.peers | toYaml }}
EOF
{{ end }}
# process IPv6 peers
# IPv6 peers
{{ if .Values.networking.bgp.ipv6.peers }}
cat << EOF | ${CALICOCTL} apply -f -
$CTL apply -f - <<EOF
{{ .Values.networking.bgp.ipv6.peers | toYaml }}
EOF
{{ end }}
exit 0

View File

@ -2,48 +2,54 @@
set -e
# instantiate calicoctl in /opt/bin/cni, including
# a wrapper around the bin that points to the correct
# etcd endpoint and etcd certificate data
cp /calicoctl /host/opt/cni/bin/calicoctl.bin
chmod +x /host/opt/cni/bin/calicoctl.bin
# instantiate calicoctl in /opt/bin/cni, including a wrapper around
# the bin that points to the correct etcd endpoint and etcd
# certificate data
cp -v /calicoctl /host/opt/cni/bin/calicoctl.bin
[ -x /host/opt/cni/bin/calicoctl.bin ] || chmod +x /host/opt/cni/bin/calicoctl.bin
if [ ! -z "$ETCD_KEY" ];
then
DIR=$(dirname /host/$ETCD_KEY_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_KEY_FILE
if [ ! -z "$ETCD_KEY" ]; then
DIR=$(dirname /host/$ETCD_KEY_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_KEY_FILE
$ETCD_KEY
EOF
chmod 600 /host/$ETCD_KEY_FILE
chmod 600 /host/$ETCD_KEY_FILE
fi;
if [ ! -z "$ETCD_CA_CERT" ];
then
DIR=$(dirname /host/$ETCD_CA_CERT_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_CA_CERT_FILE
if [ ! -z "$ETCD_CA_CERT" ]; then
DIR=$(dirname /host/$ETCD_CA_CERT_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_CA_CERT_FILE
$ETCD_CA_CERT
EOF
chmod 600 /host/$ETCD_CA_CERT_FILE
chmod 600 /host/$ETCD_CA_CERT_FILE
fi;
if [ ! -z "$ETCD_CERT" ];
then
DIR=$(dirname /host/$ETCD_CERT_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_CERT_FILE
if [ ! -z "$ETCD_CERT" ]; then
DIR=$(dirname /host/$ETCD_CERT_FILE)
mkdir -p $DIR
cat <<EOF>/host/$ETCD_CERT_FILE
$ETCD_CERT
EOF
chmod 600 /host/$ETCD_CERT_FILE
chmod 600 /host/$ETCD_CERT_FILE
fi;
# This looks a bit funny. Notice that if $ETCD_ENDPOINTS and friends
# are defined in this (calico node initContainer/startup) context;
# generate a shell script to set the values on the host where thse
# variables will *not* be set
cat <<EOF>/host/opt/cni/bin/calicoctl
export ETCD_ENDPOINTS=$ETCD_ENDPOINTS
#!/bin/bash
#
# do *NOT* modify this file; this is autogenerated by the calico-node
# deployment startup process
[ -e $ETCD_KEY_FILE ] && export ETCD_KEY_FILE=$ETCD_KEY_FILE
[ -e $ETCD_CERT_FILE ] && export ETCD_CERT_FILE=$ETCD_CERT_FILE
[ -e $ETCD_CA_CERT_FILE ] && export ETCD_CA_CERT_FILE=$ETCD_CA_CERT_FILE
export ETCD_ENDPOINTS="${ETCD_ENDPOINTS}"
[ -e "${ETCD_KEY_FILE}" ] && export ETCD_KEY_FILE="${ETCD_KEY_FILE}"
[ -e "${ETCD_CERT_FILE}" ] && export ETCD_CERT_FILE="${ETCD_CERT_FILE}"
[ -e "${ETCD_CA_CERT_FILE}" ] && export ETCD_CA_CERT_FILE="${ETCD_CA_CERT_FILE}"
exec /opt/cni/bin/calicoctl.bin \$*
EOF

View File

@ -1,89 +0,0 @@
# Generated by confd
include "bird_aggr.cfg";
include "custom_filters.cfg";
include "bird_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip}}`}} port {{.Values.networking.bgp.ipv4.no_mesh.port.listen}};
router id {{`{{$node_ip}}`}};
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export filter calico_ipip; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v4"}}`}}
{{`{{range gets "/global/peer_v4/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v4" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}

View File

@ -1,20 +1,20 @@
# Generated by confd
include "bird_aggr.cfg";
include "custom_filters.cfg";
include "bird_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}{{$node_ip := getv $node_ip_key}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip}}`}} port {{.Values.networking.bgp.ipv4.mesh.port.listen}};
listen bgp address {{`{{$node_ip}}`}} port {{.Values.networking.bgp.ipv4.port.listen}};
router id {{`{{$node_ip}}`}};
{{`{{$router_id := getenv "CALICO_ROUTER_ID" ""}}`}}
{{`router id {{if ne "" ($router_id)}}{{$router_id}}{{else}}{{$node_ip}}{{end}};`}}
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}{{if exists $node_logging_key}}{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}} debug all;{{else if ne $logging "none"}} debug { states };{{end}}`}}
{{`{{else if exists "/global/loglevel"}}{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}} debug all;{{else if ne $logging "none"}} debug { states };{{end}}`}}
{{`{{else}} debug { states };{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
@ -34,21 +34,22 @@ protocol kernel {
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
{{` {{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
{{` {{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
{{`{{if eq "" ($node_ip)}}# IPv4 disabled on this node.`}}
{{`{{else}}{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
{{` {{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
{{` local as {{if exists $node_as_key}}{{getv $node_as_key}}{{else}}{{getv "/global/as_num"}}{{end}};`}}
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
@ -56,7 +57,7 @@ template bgp bgp_template {
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip}}`}}; # The local address we use for the TCP connection
{{` source address {{$node_ip}}; # The local address we use for the TCP connection`}}
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
@ -65,14 +66,14 @@ template bgp bgp_template {
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
{{`{{$onode_ip_key := printf "/host/%s/ip_addr_v4" .}}`}}{{`{{if exists $onode_ip_key}}`}}{{`{{$onode_ip := getv $onode_ip_key}}`}}
{{`{{$nums := split $onode_ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{$onode_ip_key}}`}}
{{`{{if eq $onode_ip ($node_ip) }}`}}# Skipping ourselves ({{`{{$node_ip}}`}})
{{`{{else if ne "" $onode_ip}}`}}protocol bgp Mesh_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$onode_ip}}`}} as {{`{{if exists $onode_as_key}}`}}{{`{{getv $onode_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
neighbor port {{.Values.networking.bgp.ipv4.mesh.port.neighbor}};
}{{`{{end}}`}}{{`{{end}}`}}{{`{{end}}`}}
{{`{{$onode_ip_key := printf "/host/%s/ip_addr_v4" .}}{{if exists $onode_ip_key}}{{$onode_ip := getv $onode_ip_key}}`}}
{{`{{$nums := split $onode_ip "."}}{{$id := join $nums "_"}}`}}
{{`# For peer {{$onode_ip_key}}`}}
{{`{{if eq $onode_ip ($node_ip) }}# Skipping ourselves ({{$node_ip}})`}}
{{`{{else if ne "" $onode_ip}}protocol bgp Mesh_{{$id}} from bgp_template {`}}
{{` neighbor {{$onode_ip}} as {{if exists $onode_as_key}}{{getv $onode_as_key}}{{else}}{{getv "/global/as_num"}}{{end}};`}}
neighbor port {{.Values.networking.bgp.ipv4.port.neighbor}};
{{`}{{end}}{{end}}{{end}}`}}
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
@ -80,26 +81,27 @@ template bgp bgp_template {
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v4"}}`}}
{{`{{range gets "/global/peer_v4/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.mesh.port.neighbor}};
{{`{{range gets "/global/peer_v4/*"}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`protocol bgp Global_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv4.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
{{`{{else}}# No global peers configured.{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v4" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv4.mesh.port.neighbor}};
{{`{{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`protocol bgp Node_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv4.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}
{{`{{else}}# No node-specific peers configured.{{end}}`}}
{{`{{end}}{{/* End of IPv4 enable check */}}`}}

View File

@ -1,110 +0,0 @@
# Generated by confd
include "bird6_aggr.cfg";
include "custom_filters6.cfg";
include "bird6_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
{{`{{$node_ip6_key := printf "/host/%s/ip_addr_v6" (getenv "NODENAME")}}`}}{{`{{$node_ip6 := getv $node_ip6_key}}`}}
router id {{`{{$node_ip}}`}}; # Use IPv4 address since router id is 4 octets, even in MP-BGP
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export all; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{if eq "" ($node_ip6)}}`}}# IPv6 disabled on this node.
{{`{{else}}`}}{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip6}}`}} port {{.Values.networking.bgp.ipv6.mesh.port.listen}};
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip6}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Node-to-node mesh -------------
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
{{`{{$onode_ip_key := printf "/host/%s/ip_addr_v6" .}}`}}{{`{{if exists $onode_ip_key}}`}}{{`{{$onode_ip := getv $onode_ip_key}}`}}
{{`{{$nums := split $onode_ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{$onode_ip_key}}`}}
{{`{{if eq $onode_ip ($node_ip6) }}`}}# Skipping ourselves ({{`{{$node_ip6}}`}})
{{`{{else if eq "" $onode_ip}}`}}# No IPv6 address configured for this node
{{`{{else}}`}}protocol bgp Mesh_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$onode_ip}}`}} as {{`{{if exists $onode_as_key}}`}}{{`{{getv $onode_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
neighbor port {{.Values.networking.bgp.ipv6.mesh.port.neighbor}};
}{{`{{end}}`}}{{`{{end}}`}}{{`{{end}}`}}
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v6"}}`}}
{{`{{range gets "/global/peer_v6/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v6" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}
{{`{{end}}`}}

View File

@ -1,93 +0,0 @@
# Generated by confd
include "bird6_aggr.cfg";
include "custom_filters6.cfg";
include "bird6_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}`}}{{`{{$node_ip := getv $node_ip_key}}`}}
{{`{{$node_ip6_key := printf "/host/%s/ip_addr_v6" (getenv "NODENAME")}}`}}{{`{{$node_ip6 := getv $node_ip6_key}}`}}
router id {{`{{$node_ip}}`}}; # Use IPv4 address since router id is 4 octets, even in MP-BGP
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}`}}{{`{{if exists $node_logging_key}}`}}{{`{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else if exists "/global/loglevel"}}`}}{{`{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}}`}} debug all;{{`{{else if ne $logging "none"}}`}} debug { states };{{`{{end}}`}}
{{`{{else}}`}} debug { states };{{`{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export all; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{`{{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{`{{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{if eq "" ($node_ip6)}}`}}# IPv6 disabled on this node.
{{`{{else}}`}}{{`{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip6}}`}} port {{.Values.networking.bgp.ipv6.no_mesh.port.listen}};
# Template for all BGP clients
template bgp bgp_template {
{{`{{template "LOGGING"}}`}}
description "Connection to BGP peer";
local as {{`{{if exists $node_as_key}}`}}{{`{{getv $node_as_key}}`}}{{`{{else}}`}}{{`{{getv "/global/as_num"}}`}}{{`{{end}}`}};
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
source address {{`{{$node_ip6}}`}}; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v6"}}`}}
{{`{{range gets "/global/peer_v6/*"}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Global_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No global peers configured.{{`{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v6" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}`}}{{`{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}`}}{{`{{$id := join $nums "_"}}`}}
# For peer {{`{{.Key}}`}}
protocol bgp Node_{{`{{$id}}`}} from bgp_template {
neighbor {{`{{$data.ip}}`}} as {{`{{$data.as_num}}`}};
neighbor port {{.Values.networking.bgp.ipv6.no_mesh.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}`}}# No node-specific peers configured.{{`{{end}}`}}
{{`{{end}}`}}

View File

@ -0,0 +1,110 @@
# Generated by confd
include "bird6_aggr.cfg";
include "bird6_ipam.cfg";
{{`{{$node_ip_key := printf "/host/%s/ip_addr_v4" (getenv "NODENAME")}}{{$node_ip := getv $node_ip_key}}`}}
{{`{{$node_ip6_key := printf "/host/%s/ip_addr_v6" (getenv "NODENAME")}}{{$node_ip6 := getv $node_ip6_key}}`}}
{{`{{$router_id := getenv "CALICO_ROUTER_ID" ""}}`}}
{{`router id {{if ne "" ($router_id)}}{{$router_id}}{{else}}{{$node_ip}}{{end}}; # Use IPv4 address since router id is 4 octets, even in MP-BGP`}}
{{`{{define "LOGGING"}}`}}
{{`{{$node_logging_key := printf "/host/%s/loglevel" (getenv "NODENAME")}}{{if exists $node_logging_key}}{{$logging := getv $node_logging_key}}`}}
{{`{{if eq $logging "debug"}} debug all;{{else if ne $logging "none"}} debug { states };{{end}}`}}
{{`{{else if exists "/global/loglevel"}}{{$logging := getv "/global/loglevel"}}`}}
{{`{{if eq $logging "debug"}} debug all;{{else if ne $logging "none"}} debug { states };{{end}}`}}
{{`{{else}} debug { states };{{end}}`}}
{{`{{end}}`}}
# Configure synchronization between routing tables and kernel.
protocol kernel {
learn; # Learn all alien routes from the kernel
persist; # Don't remove routes on bird shutdown
scan time 2; # Scan kernel routing table every 2 seconds
import all;
export all; # Default is export none
graceful restart; # Turn on graceful restart to reduce potential flaps in
# routes when reloading BIRD configuration. With a full
# automatic mesh, there is no way to prevent BGP from
# flapping since multiple nodes update their BGP
# configuration at the same time, GR is not guaranteed to
# work correctly in this scenario.
}
# Watch interface up/down events.
protocol device {
{{` {{template "LOGGING"}}`}}
scan time 2; # Scan interfaces every 2 seconds
}
protocol direct {
{{` {{template "LOGGING"}}`}}
interface -"cali*", "*"; # Exclude cali* but include everything else.
}
{{`{{if eq "" ($node_ip6)}}# IPv6 disabled on this node.`}}
{{`{{else}}{{$node_as_key := printf "/host/%s/as_num" (getenv "NODENAME")}}`}}
# ensure we only listen to a specific ip and address
listen bgp address {{`{{$node_ip6}}`}} port {{.Values.networking.bgp.ipv6.port.listen}};
# Template for all BGP clients
template bgp bgp_template {
{{` {{template "LOGGING"}}`}}
description "Connection to BGP peer";
{{` local as {{if exists $node_as_key}}{{getv $node_as_key}}{{else}}{{getv "/global/as_num"}}{{end}};`}}
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
{{` source address {{$node_ip6}}; # The local address we use for the TCP connection`}}
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Node-to-node mesh -------------
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
{{`{{$onode_ip_key := printf "/host/%s/ip_addr_v6" .}}{{if exists $onode_ip_key}}{{$onode_ip := getv $onode_ip_key}}`}}
{{`{{$nums := split $onode_ip ":"}}{{$id := join $nums "_"}}`}}
{{`# For peer {{$onode_ip_key}}`}}
{{`{{if eq $onode_ip ($node_ip6) }}# Skipping ourselves ({{$node_ip6}})`}}
{{`{{else if eq "" $onode_ip}}# No IPv6 address configured for this node`}}
{{`{{else}}protocol bgp Mesh_{{$id}} from bgp_template {`}}
{{` neighbor {{$onode_ip}} as {{if exists $onode_as_key}}{{getv $onode_as_key}}{{else}}{{getv "/global/as_num"}}{{end}};`}}
neighbor port {{.Values.networking.bgp.ipv6.port.neighbor}};
{{`}{{end}}{{end}}{{end}}`}}
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
# ------------- Global peers -------------
{{`{{if ls "/global/peer_v6"}}`}}
{{`{{range gets "/global/peer_v6/*"}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`protocol bgp Global_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv6.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}# No global peers configured.{{end}}`}}
# ------------- Node-specific peers -------------
{{`{{$node_peers_key := printf "/host/%s/peer_v6" (getenv "NODENAME")}}`}}
{{`{{if ls $node_peers_key}}`}}
{{`{{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`protocol bgp Node_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv6.port.neighbor}};
}
{{`{{end}}`}}
{{`{{else}}# No node-specific peers configured.{{end}}`}}
{{`{{end}}`}}

View File

@ -1,9 +1,18 @@
# Generated by confd
function osh_filters ()
{
# support any addresses matching our secondary announcements
{{- range .Values.networking.bgp.ipv6.additional_cidrs }}
if ( net ~ {{ . }} ) then { accept; }
{{- end }}
}
filter calico_pools {
calico_aggr();
custom_filters();
{{`{{range ls "/pool"}}`}}{{`{{$data := json (getv (printf "/pool/%s" .))}}`}}
if ( net ~ {{`{{$data.cidr}}`}} ) then {
osh_filters();
{{`{{range ls "/pool"}}{{$data := json (getv (printf "/pool/%s" .))}}`}}
{{` if ( net ~ {{$data.cidr}} ) then {`}}
accept;
}
{{`{{end}}`}}

View File

@ -1,22 +0,0 @@
# Generated by confd
# ------------- Static black hole addresses -------------
{{`{{if ls "/"}}`}}
protocol static {
{{`{{range ls "/"}}`}}
{{`{{$parts := split . "-"}}`}}
{{`{{$cidr := join $parts "/"}}`}}
route {{`{{$cidr}}`}} blackhole;
{{`{{end}}`}}
}
{{`{{else}}`}}# No static routes configured.{{`{{end}}`}}
# Aggregation of routes on this host; export the block, nothing beneath it.
function calico_aggr ()
{
{{`{{range ls "/"}}`}}
{{`{{$parts := split . "-"}}`}}
{{`{{$cidr := join $parts "/"}}`}}
if ( net = {{`{{$cidr}}`}} ) then { accept; }
if ( net ~ {{`{{$cidr}}`}} ) then { reject; }
{{`{{end}}`}}
}

View File

@ -1,32 +1,44 @@
# Generated by confd
function osh_filters ()
{
# support any addresses matching our secondary announcements
{{- range .Values.networking.bgp.ipv4.additional_cidrs }}
if ( net ~ {{ . }} ) then { accept; }
{{- end }}
}
filter calico_pools {
calico_aggr();
custom_filters();
{{`{{range ls "/v1/ipam/v4/pool"}}`}}{{`{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}`}}
if ( net ~ {{`{{$data.cidr}}`}} ) then {
osh_filters();
{{`{{range ls "/v1/ipam/v4/pool"}}{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}`}}
{{` if ( net ~ {{$data.cidr}} ) then {`}}
accept;
}
{{`{{end}}`}}
reject;
}
{{`{{$network_key := printf "/bgp/v1/host/%s/network_v4" (getenv "NODENAME")}}`}}{{`{{$network := getv $network_key}}`}}
{{`{{$network_key := printf "/bgp/v1/host/%s/network_v4" (getenv "NODENAME")}}{{if exists $network_key}}{{$network := getv $network_key}}`}}
filter calico_ipip {
{{`{{range ls "/v1/ipam/v4/pool"}}`}}{{`{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}`}}
if ( net ~ {{`{{$data.cidr}}`}} ) then {
{{`{{if $data.ipip_mode}}`}}{{`{{if eq $data.ipip_mode "cross-subnet"}}`}}
if ( from ~ {{`{{$network}}`}} ) then
krt_tunnel = ""; {{`{{/* Destination in ipPool, mode is cross sub-net, route from-host on subnet, do not use IPIP */}}`}}
{{`{{range ls "/v1/ipam/v4/pool"}}{{$data := json (getv (printf "/v1/ipam/v4/pool/%s" .))}}`}}
{{` if ( net ~ {{$data.cidr}} ) then {`}}
{{`{{if $data.ipip_mode}}{{if eq $data.ipip_mode "cross-subnet"}}`}}
{{` if defined(bgp_next_hop) && ( bgp_next_hop ~ {{$network}} ) then`}}
{{` krt_tunnel = ""; {{/* Destination in ipPool, mode is cross sub-net, route from-host on subnet, do not use IPIP */}}`}}
else
krt_tunnel = "{{`{{$data.ipip}}`}}"; {{`{{/* Destination in ipPool, mode is cross sub-net, route from-host off subnet, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
{{` krt_tunnel = "{{$data.ipip}}"; {{/* Destination in ipPool, mode is cross sub-net, route from-host off subnet, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
accept;
} {{`{{else}}`}}
krt_tunnel = "{{`{{$data.ipip}}`}}"; {{`{{/* Destination in ipPool, mode not cross sub-net, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
{{` } {{else}}`}}
{{` krt_tunnel = "{{$data.ipip}}"; {{/* Destination in ipPool, mode not cross sub-net, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
accept;
} {{`{{end}}`}} {{`{{else}}`}}
krt_tunnel = "{{`{{$data.ipip}}`}}"; {{`{{/* Destination in ipPool, mode field is not present, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
{{` } {{end}} {{else}}`}}
{{` krt_tunnel = "{{$data.ipip}}"; {{/* Destination in ipPool, mode field is not present, set the tunnel (if IPIP not enabled, value will be "") */}}`}}
accept;
} {{`{{end}}`}}
{{` } {{end}}`}}
{{`{{end}}`}}
accept; {{`{{/* Destination is not in any ipPool, accept */}}`}}
{{` accept; {{/* Destination is not in any ipPool, accept */}}`}}
}
{{`{{else}}`}}
filter calico_ipip { accept; }
{{`{{end}}{{/* End of 'exists $network_key' */}}`}}

View File

@ -1,13 +0,0 @@
# Generated by confd
function custom_filters ()
{
{{`{{range ls "/v4"}}`}}{{`{{$data := getv (printf "/v4/%s" .)}}`}}
{{`{{ $data }}`}}
{{`{{end}}`}}
# support any addresses matching our secondary announcements
{{ range .Values.networking.bgp.ipv4.additional_cidrs }}
if ( net ~ {{ . }} ) then { accept; }
{{ end }}
}

View File

@ -1,13 +0,0 @@
# Generated by confd
function custom_filters ()
{
{{`{{range ls "/v6"}}`}}{{`{{$data := getv (printf "/v6/%s" .)}}`}}
{{`{{ $data }}`}}
{{`{{end}}`}}
# support any addresses matching our secondary announcements
{{ range .Values.networking.bgp.ipv6.additional_cidrs }}
if ( net ~ {{ . }} ) then { accept; }
{{ end }}
}

View File

@ -25,23 +25,12 @@ metadata:
data:
# we overlay templates found natively in the calico-node container
# so that we may override bgp configuration
bird6.cfg.mesh.template: |
{{ tuple "bird/_bird6.cfg.mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird6.cfg.no-mesh.template: |
{{ tuple "bird/_bird6.cfg.no-mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird6_ipam.cfg.template: |
{{ tuple "bird/_bird6_ipam.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird_aggr.cfg.template: |
{{ tuple "bird/_bird_aggr.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird.cfg.mesh.template: |
{{ tuple "bird/_bird.cfg.mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird.cfg.no-mesh.template: |
{{ tuple "bird/_bird.cfg.no-mesh.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird.cfg.template: |
{{ tuple "bird/_bird.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird_ipam.cfg.template: |
{{ tuple "bird/_bird_ipam.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
custom_filters6.cfg.template: |
{{ tuple "bird/_custom_filters6.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
custom_filters.cfg.template: |
{{ tuple "bird/_custom_filters.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird6.cfg.template: |
{{ tuple "bird/_bird6.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
bird6_ipam.cfg.template: |
{{ tuple "bird/_bird6_ipam.cfg.template.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -16,19 +16,6 @@ limitations under the License.
{{- if .Values.manifests.configmap_etc }}
{{- $envAll := . }}
{{- if empty .Values.conf.cni_network_config.mtu -}}
{{/*
#NOTE(portdirect): to err on the side of caution we subtract 20 from the physical
# MTU to account for IPIP overhead unless explicty turned off.
*/}}
{{- if eq .Values.conf.node.CALICO_IPV4POOL_IPIP "off" -}}
{{- $_ := set .Values.conf.cni_network_config "mtu" .Values.networking.mtu -}}
{{- else -}}
{{- $_ := set .Values.conf.cni_network_config "mtu" (sub .Values.networking.mtu 20) -}}
{{- end -}}
{{- end -}}
---
kind: ConfigMap
apiVersion: v1

View File

@ -19,6 +19,7 @@ limitations under the License.
{{- $serviceAccountName := "calico-etcd"}}
{{ tuple $envAll "calico-etcd" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
@ -49,19 +50,20 @@ spec:
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: {{ $serviceAccountName }}
tolerations:
# This taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the Calico pods
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
# Allow this pod to run on the master.
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
# Only run this pod on the master.
nodeSelector:
node-role.kubernetes.io/master: ""
@ -78,14 +80,14 @@ spec:
fieldRef:
fieldPath: status.podIP
command:
- /usr/local/bin/etcd
- /usr/local/bin/etcd
args:
- --name=calico
- --data-dir=/var/etcd/calico-data
- --advertise-client-urls={{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
- --listen-client-urls={{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}://0.0.0.0:{{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --listen-peer-urls={{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}://0.0.0.0:{{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --auto-compaction-retention=1
- --name=calico
- --data-dir=/var/etcd/calico-data
- --advertise-client-urls={{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" }}
- --listen-client-urls={{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}://0.0.0.0:{{ tuple "etcd" "internal" "client" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --listen-peer-urls={{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.keystone_endpoint_scheme_lookup" }}://0.0.0.0:{{ tuple "etcd" "internal" "peer" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
- --auto-compaction-retention=1
volumeMounts:
- name: var-etcd
mountPath: /var/etcd

View File

@ -17,34 +17,42 @@ limitations under the License.
{{- if .Values.manifests.daemonset_calico_node }}
{{- $envAll := . }}
{{/* Adjust MTU iff we have tunnel overhead; 20 suffices for an IPv4 IPIP header */}}
{{- if ne .Values.conf.node.CALICO_IPV4POOL_IPIP "Never" -}}
{{- $_ := set .Values.networking "mtu" (sub .Values.networking.mtu 20) -}}
# Adjusted MTU to {{ .Values.networking.mtu }}
{{ end -}}
{{/* Some values need to be specified in multiple places; set appropriately */}}
{{- if empty .Values.conf.node.FELIX_IPINIPMTU -}}
{{- $_ := set .Values.conf.node "FELIX_IPINIPMTU" .Values.networking.mtu -}}
{{- end -}}
{{- if empty .Values.conf.node.CNI_MTU -}}
{{- $_ := set .Values.conf.node "CNI_MTU" .Values.conf.node.FELIX_IPINIPMTU -}}
{{- end -}}
{{- if empty .Values.conf.node.CALICO_IPV4POOL_CIDR -}}
{{- $_ := set .Values.conf.node "CALICO_IPV4POOL_CIDR" .Values.networking.podSubnet -}}
{{- end -}}
{{- if empty .Values.conf.node.FELIX_IPINIPMTU -}}
{{/*
#NOTE(portdirect): to err on the side of caution we subtract 20 from the physical
# MTU to account for IPIP overhead unless explicty turned off.
*/}}
{{- if eq .Values.conf.node.CALICO_IPV4POOL_IPIP "off" -}}
{{- $_ := set .Values.conf.node "FELIX_IPINIPMTU" .Values.networking.mtu -}}
{{- else -}}
{{- $_ := set .Values.conf.node "FELIX_IPINIPMTU" (sub .Values.networking.mtu 20) -}}
{{- end -}}
{{- end -}}
{{- $serviceAccountName := printf "%s-%s" .Release.Name "calico-cni-plugin"}}
{{- $serviceAccountName := "calico-node"}}
{{ tuple $envAll "calico_node" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $serviceAccountName }}
name: calico-node
subjects:
- kind: ServiceAccount
name: {{ $serviceAccountName }}
@ -61,6 +69,9 @@ rules:
- nodes
verbs:
- get
- apiGroups: ["batch" ]
resources: ["jobs"]
verbs: ["get" ]
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
@ -118,6 +129,7 @@ spec:
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
{{ tuple $envAll "calico_node" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
{{ if .Values.manifests.daemonset_calico_node_calicoctl }}
@ -132,7 +144,6 @@ spec:
configMapKeyRef:
name: calico-etc
key: etcd_endpoints
{{ if .Values.endpoints.etcd.auth.client.tls.ca}}
- name: ETCD_CA_CERT_FILE
value: {{ .Values.endpoints.etcd.auth.client.path.ca }}
@ -181,6 +192,7 @@ spec:
subPath: tls.key
readOnly: true
{{ end }}
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
@ -239,6 +251,15 @@ spec:
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
# Only for Calico v3
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
@ -249,37 +270,21 @@ spec:
# bird template replacements
# bird cfg
- mountPath: /etc/calico/confd/templates/bird.cfg.mesh.template
- mountPath: /etc/calico/confd/templates/bird.cfg.template
name: calico-bird
subPath: bird.cfg.mesh.template
- mountPath: /etc/calico/confd/templates/bird.cfg.no-mesh.template
name: calico-bird
subPath: bird.cfg.no-mesh.template
subPath: bird.cfg.template
# bird ipam
- mountPath: /etc/calico/confd/templates/bird_ipam.cfg.template
name: calico-bird
subPath: bird_ipam.cfg.template
# bird6 cfg
- mountPath: /etc/calico/confd/templates/bird6.cfg.mesh.template
- mountPath: /etc/calico/confd/templates/bird6.cfg.template
name: calico-bird
subPath: bird6.cfg.mesh.template
- mountPath: /etc/calico/confd/templates/bird6.cfg.no-mesh.template
name: calico-bird
subPath: bird6.cfg.no-mesh.template
subPath: bird6.cfg.template
# bird6 ipam
- mountPath: /etc/calico/confd/templates/bird6_ipam.cfg.template
name: calico-bird
subPath: bird6_ipam.cfg.template
# filters...
- mountPath: /etc/calico/confd/templates/bird_aggr.cfg.template
name: calico-bird
subPath: bird_aggr.cfg.template
- mountPath: /etc/calico/confd/templates/custom_filters6.cfg.template
name: calico-bird
subPath: custom_filters6.cfg.template
- mountPath: /etc/calico/confd/templates/custom_filters.cfg.template
name: calico-bird
subPath: custom_filters.cfg.template
# etcd secrets
- mountPath: /var/lib/calico
name: var-lib-calico
@ -300,15 +305,14 @@ spec:
# and CNI network config file on each node.
- name: install-cni
{{ tuple $envAll "calico_cni" | include "helm-toolkit.snippets.image" | indent 10 }}
{{ tuple $envAll $envAll.Values.pod.resources.calico_cni | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
#
# NOTE: Calico v2 needs to end in .conf; Calico v3 is
# NOTE: Calico v3 needs to end in .conflist; Calico v2 is
# different!
- name: CNI_CONF_NAME
value: "10-calico.conf"
value: "10-calico.conflist"
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
@ -321,6 +325,7 @@ spec:
configMapKeyRef:
name: calico-etc
key: cni_network_config
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
@ -362,4 +367,5 @@ spec:
- name: calico-etcd-secrets
secret:
secretName: calico-etcd-secrets
{{- end }}

View File

@ -1,4 +1,4 @@
{{/*
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
@ -17,7 +17,7 @@ limitations under the License.
{{- if .Values.manifests.deployment_calico_kube_controllers }}
{{- $envAll := . }}
{{- $serviceAccountName := printf "%s-%s" .Release.Name "calico-kube-controllers"}}
{{- $serviceAccountName := "calico-kube-controllers"}}
{{ tuple $envAll "calico_kube_controllers" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
kind: ClusterRoleBinding
@ -58,6 +58,7 @@ rules:
- watch
- list
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
@ -98,13 +99,14 @@ spec:
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: {{ $serviceAccountName }}
initContainers:
{{ tuple $envAll "calico_kube_controllers" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
@ -152,6 +154,12 @@ spec:
subPath: tls.key
readOnly: true
# Calico v3 only
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
volumes:
- name: calico-etcd-secrets
secret:

View File

@ -0,0 +1,2 @@
H4sIAJLrq1sCA+3IOwqFMABE0SwlS4jGxPVYvFIQP4W7N1ja+0A4p7nD/OZlP8O7UlOH4W7z7L27
nEs/1lL62v4x5S7EFP7g2PZpjTEAAAAAAAAAAADAh1zOUd8NACgAAA==

View File

@ -14,13 +14,15 @@
images:
tags:
calico_etcd: quay.io/coreos/etcd:v3.1.14
calico_node: quay.io/calico/node:v2.6.9
calico_cni: quay.io/calico/cni:v1.11.5
calico_ctl: quay.io/calico/ctl:v1.6.4
calico_settings: quay.io/calico/ctl:v1.6.4
# These are minimum versions, older images will very likely not
# work
calico_etcd: quay.io/coreos/etcd:v3.3.9
calico_node: quay.io/calico/node:v3.2.1
calico_cni: quay.io/calico/cni:v3.2.1
calico_ctl: calico/ctl:release-v3.2-amd64
calico_settings: calico/ctl:release-v3.2-amd64
# NOTE: plural key, singular value
calico_kube_controllers: quay.io/calico/kube-policy-controller:v0.7.0
calico_kube_controllers: quay.io/calico/kube-controllers:v3.2.1
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
image_repo_sync: docker.io/docker:17.07.0
pull_policy: IfNotPresent
@ -179,9 +181,9 @@ monitoring:
networking:
podSubnet: 192.168.0.0/16
# NOTE(portdirect): this should be the physical MTU, the appropriate MTU
# that calico should use will be calculated.
# Physical MTU, if ipip is enabled, the chart will adjust things downward
mtu: 1500
settings:
mesh: "on"
# technically this could be a list, today we only support
@ -190,69 +192,60 @@ networking:
ippool:
ipip:
enabled: "true"
# lowercase value
mode: "always"
# Titlecase
mode: "Always"
nat_outgoing: "true"
disabled: "false"
bgp:
# our asnumber for bgp peering
asnumber: 64512
ipv4:
# https://docs.projectcalico.org/v2.0/reference/calicoctl/resources/bgppeer
# https://docs.projectcalico.org/v3.2/reference/calicoctl/resources/bgppeer
#
# this is a list of peer objects that will be passed directly to
# calicoctl - for global peers, the scope should be global and
# the node attribute removed
#
# apiVersion: v1
# kind: bgpPeer
# metadata:
# peerIP: 10.1.10.39
# scope: node
# node: some.name
# spec:
# asNumber: 64512
# apiVersion: projectcalico.org/v3
# kind: BGPPeer
# metadata:
# name: some.name
# spec:
# node: rack1-host1
# peerIP: 10.1.10.39
# asNumber: 64512
peers: []
# this is a list of additional IPv4 cidrs that if we discover
# IPs within them on a host, we will announce the address in
# addition to traditional pod workloads
additional_cidrs: []
mesh:
port:
neighbor: 179
listen: 179
no_mesh:
port:
neighbor: 179
listen: 179
port:
neighbor: 179
listen: 179
ipv6:
# https://docs.projectcalico.org/v2.0/reference/calicoctl/resources/bgppeer
# https://docs.projectcalico.org/v3.2/reference/calicoctl/resources/bgppeer
#
# this is a list of peer objects that will be passed directly to
# calicoctl - for global peers, the scope should be global and
# the node attribute removed
#
# apiVersion: v1
# kind: bgpPeer
# metadata:
# peerIP: 2600:1:2:3::abcd
# scope: node
# node: rack1-host1
# spec:
# asNumber: 64512
# apiVersion: projectcalico.org/v3
# kind: BGPPeer
# metadata:
# name: some.name
# spec:
# node: rack1-host1
# peerIP: 2600:1:2:3::abcd
# asNumber: 64512
peers: []
# this is a list of additional IPv6 cidrs that if we discover
# IPs within them on a host, we will announce them in addition
# to traditional pod workloads
additional_cidrs: []
mesh:
port:
neighbor: 179
listen: 179
no_mesh:
port:
neighbor: 179
listen: 179
port:
neighbor: 179
listen: 179
conf:
etcd:
@ -260,22 +253,34 @@ conf:
ca: null
key: null
certificate: null
# NOTE; syntax has subtly changed since Calico v2. For Armada *all*
# of this needes to be specified. We're using yaml here which we
# can't robustly convert to json (which the node pod requires) so it
# might be we revisit that and embedded a json string that gets
# edits
cni_network_config:
# https://docs.projectcalico.org/v2.0/reference/cni-plugin/configuration
# https://docs.projectcalico.org/v3.2/reference/cni-plugin/configuration
#
# other than the etcd_* keys you likely want to leave this as-is
name: k8s-pod-network
cniVersion: 0.1.0
type: calico
etcd_endpoints: __ETCD_ENDPOINTS__
log_level: info
mtu: null
ipam:
type: calico-ipam
policy:
type: k8s
k8s_api_root: https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__
k8s_auth_token: __SERVICEACCOUNT_TOKEN__
kubernetes:
kubeconfig: "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
cniVersion: 0.3.0
plugins:
- type: calico
log_level: info
etcd_endpoints: __ETCD_ENDPOINTS__
etcd_key_file: __ETCD_KEY_FILE__
etcd_cert_file: __ETCD_CERT_FILE__
etcd_ca_cert_file: __ETCD_CA_CERT_FILE__
ipam:
type: calico-ipam
policy:
type: k8s
kubernetes:
kubeconfig: __KUBECONFIG_FILEPATH__
- type: portmap
snat: true
capabilities:
portMappings: true
controllers:
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
@ -286,11 +291,14 @@ conf:
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
CONFIGURE_ETC_HOSTS: "true"
node:
# for specific details see
# https://docs.projectcalico.org/v3.2/reference/node/configuration
name: k8s-pod-network
# Cluster type to identify the deployment type
CLUSTER_TYPE:
- kubeadm
- bgp
# NOTE: v2 had a list ... v3 a comma separated string
CLUSTER_TYPE: "k8s,bgp"
# Describes which BGP networking backend to use gobgp, bird, none. Default is bird.
# NOTE(alanmeadows) today this chart only supports applying the bgp customizations to
# bird templates - in the future we may support gobgp as well
@ -308,8 +316,8 @@ conf:
# Configure the IP Pool from which Pod IPs will be chosen.
CALICO_IPV4POOL_CIDR: null
# Change this to 'off' in environments with direct L2 communication
# lowercase
CALICO_IPV4POOL_IPIP: "always"
# Titlecase
CALICO_IPV4POOL_IPIP: "Always"
# Disable IPv6 on Kubernetes.
FELIX_IPV6SUPPORT: "false"
# Set MTU for tunnel device used if ipip is enabled