[Calico] Update to v3.3.2

Calico v3.3.2 update with:

 * update container images
 * update configuration
   * expose  ipPool blockSize
 * update bird templates
 * update rbac

Change-Id: I72d218cb55a70b72c4d7d940109e718fe44a094f
This commit is contained in:
Chris Wedgwood 2018-10-28 19:47:04 +00:00
parent 13a58c5530
commit 49604fbd17
5 changed files with 73 additions and 16 deletions

View File

@ -40,7 +40,7 @@ spec:
logSeverityScreen: {{ .Values.conf.node.FELIX_LOGSEVERITYSCREEN }}
EOF
# ipPool - https://docs.projectcalico.org/v3.2/reference/calicoctl/resources/ippool
# ipPool - https://docs.projectcalico.org/v3.3/reference/calicoctl/resources/ippool
$CTL apply -f - <<EOF
apiVersion: projectcalico.org/v3
kind: IPPool
@ -48,6 +48,9 @@ metadata:
name: default-ipv4-ippool
spec:
cidr: {{ .Values.conf.node.CALICO_IPV4POOL_CIDR }}
{{- if .Values.conf.node.CALICO_IPV4POOL_BLOCKSIZE }}
blockSize: {{ .Values.conf.node.CALICO_IPV4POOL_BLOCKSIZE }}
{{- end }}
ipipMode: {{ .Values.networking.settings.ippool.ipip.mode }}
natOutgoing: {{ .Values.networking.settings.ippool.nat_outgoing }}
disabled: {{ .Values.networking.settings.ippool.disabled }}

View File

@ -55,14 +55,18 @@ template bgp bgp_template {
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
{{` source address {{$node_ip}}; # The local address we use for the TCP connection`}}
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Node-to-node mesh -------------
{{`{{- $node_cid_key := printf "/host/%s/rr_cluster_id" (getenv "NODENAME")}}`}}
{{`{{- $node_cluster_id := getv $node_cid_key}}`}}
{{`{{- if ne "" ($node_cluster_id)}}`}}
{{`# This node is configured as a route reflector with cluster ID {{$node_cluster_id}};`}}
# ignore node-to-node mesh setting.
{{`{{- else}}`}}
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
@ -77,6 +81,7 @@ template bgp bgp_template {
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
{{`{{- end}}`}}
# ------------- Global peers -------------
@ -84,10 +89,18 @@ template bgp bgp_template {
{{`{{range gets "/global/peer_v4/*"}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`{{- if eq $data.ip ($node_ip) }}`}}
{{`# Skipping ourselves ({{$node_ip}})`}}
{{`{{- else}}`}}
{{`protocol bgp Global_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv4.port.neighbor}};
{{`{{- if and (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}`}}
rr client;
{{` rr cluster id {{$node_cluster_id}};`}}
{{`{{- end}}`}}
}
{{`{{- end}}`}}
{{`{{end}}`}}
{{`{{else}}# No global peers configured.{{end}}`}}
@ -98,10 +111,18 @@ template bgp bgp_template {
{{`{{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip "."}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`{{- if eq $data.ip ($node_ip) }}`}}
{{`# Skipping ourselves ({{$node_ip}})`}}
{{`{{- else}}`}}
{{`protocol bgp Node_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv4.port.neighbor}};
{{`{{- if and (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}`}}
rr client;
{{` rr cluster id {{$node_cluster_id}};`}}
{{`{{- end}}`}}
}
{{`{{- end}}`}}
{{`{{end}}`}}
{{`{{else}}# No node-specific peers configured.{{end}}`}}
{{`{{end}}{{/* End of IPv4 enable check */}}`}}

View File

@ -57,14 +57,18 @@ template bgp bgp_template {
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export filter calico_pools; # Only want to export routes for workloads.
next hop self; # Disable next hop processing and always advertise our
# local address as nexthop
{{` source address {{$node_ip6}}; # The local address we use for the TCP connection`}}
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
}
# ------------- Node-to-node mesh -------------
{{`{{- $node_cid_key := printf "/host/%s/rr_cluster_id" (getenv "NODENAME")}}`}}
{{`{{- $node_cluster_id := getv $node_cid_key}}`}}
{{`{{- if ne "" ($node_cluster_id)}}`}}
{{`# This node is configured as a route reflector with cluster ID {{$node_cluster_id}};`}}
# ignore node-to-node mesh setting.
{{`{{- else}}`}}
{{`{{if (json (getv "/global/node_mesh")).enabled}}`}}
{{`{{range $host := lsdir "/host"}}`}}
{{`{{$onode_as_key := printf "/host/%s/as_num" .}}`}}
@ -80,6 +84,7 @@ template bgp bgp_template {
{{`{{else}}`}}
# Node-to-node mesh disabled
{{`{{end}}`}}
{{`{{- end}}`}}
# ------------- Global peers -------------
@ -87,10 +92,18 @@ template bgp bgp_template {
{{`{{range gets "/global/peer_v6/*"}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`{{- if eq $data.ip ($node_ip6) }}`}}
{{`# Skipping ourselves ({{$node_ip6}})`}}
{{`{{- else}}`}}
{{`protocol bgp Global_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv6.port.neighbor}};
{{`{{- if and (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}`}}
rr client;
{{` rr cluster id {{$node_cluster_id}};`}}
{{`{{- end}}`}}
}
{{`{{- end}}`}}
{{`{{end}}`}}
{{`{{else}}# No global peers configured.{{end}}`}}
@ -101,10 +114,18 @@ template bgp bgp_template {
{{`{{range gets (printf "%s/*" $node_peers_key)}}{{$data := json .Value}}`}}
{{`{{$nums := split $data.ip ":"}}{{$id := join $nums "_"}}`}}
{{`# For peer {{.Key}}`}}
{{`{{- if eq $data.ip ($node_ip6) }}`}}
{{`# Skipping ourselves ({{$node_ip6}})`}}
{{`{{- else}}`}}
{{`protocol bgp Node_{{$id}} from bgp_template {`}}
{{` neighbor {{$data.ip}} as {{$data.as_num}};`}}
neighbor port {{.Values.networking.bgp.ipv6.port.neighbor}};
{{`{{- if and (ne "" ($node_cluster_id)) (ne $data.rr_cluster_id ($node_cluster_id))}}`}}
rr client;
{{` rr cluster id {{$node_cluster_id}};`}}
{{`{{- end}}`}}
}
{{`{{- end}}`}}
{{`{{end}}`}}
{{`{{else}}# No node-specific peers configured.{{end}}`}}
{{`{{end}}`}}

View File

@ -67,6 +67,7 @@ rules:
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: ["batch" ]
@ -265,6 +266,9 @@ spec:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
@ -343,6 +347,10 @@ spec:
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:

View File

@ -17,12 +17,12 @@ images:
# These are minimum versions, older images will very likely not
# work
calico_etcd: quay.io/coreos/etcd:v3.3.9
calico_node: quay.io/calico/node:v3.2.4
calico_cni: quay.io/calico/cni:v3.2.4
calico_ctl: calico/ctl:v3.2.4
calico_settings: calico/ctl:v3.2.4
calico_node: quay.io/calico/node:v3.3.2
calico_cni: quay.io/calico/cni:v3.3.2
calico_ctl: calico/ctl:v3.3.2
calico_settings: calico/ctl:v3.3.2
# NOTE: plural key, singular value
calico_kube_controllers: quay.io/calico/kube-controllers:v3.2.4
calico_kube_controllers: quay.io/calico/kube-controllers:v3.3.2
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
image_repo_sync: docker.io/docker:17.07.0
pull_policy: IfNotPresent
@ -205,7 +205,7 @@ networking:
# our asnumber for bgp peering
asnumber: 64512
ipv4:
# https://docs.projectcalico.org/v3.2/reference/calicoctl/resources/bgppeer
# https://docs.projectcalico.org/v3.3/reference/calicoctl/resources/bgppeer
#
# this is a list of peer objects that will be passed directly to
# calicoctl - for global peers, the scope should be global and
@ -228,7 +228,7 @@ networking:
neighbor: 179
listen: 179
ipv6:
# https://docs.projectcalico.org/v3.2/reference/calicoctl/resources/bgppeer
# https://docs.projectcalico.org/v3.3/reference/calicoctl/resources/bgppeer
#
# this is a list of peer objects that will be passed directly to
# calicoctl - for global peers, the scope should be global and
@ -410,7 +410,7 @@ conf:
# might be we revisit that and embedded a json string that gets
# edits
cni_network_config:
# https://docs.projectcalico.org/v3.2/reference/cni-plugin/configuration
# https://docs.projectcalico.org/v3.3/reference/cni-plugin/configuration
#
# other than the etcd_* keys you likely want to leave this as-is
name: k8s-pod-network
@ -436,8 +436,10 @@ conf:
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
K8S_API: "https://kubernetes.default:443"
# Choose which controllers to run.
ENABLED_CONTROLLERS: "policy,profile,workloadendpoint,node"
# Choose which controllers to run, see
# https://docs.projectcalico.org//v3.3/reference/kube-controllers/configuration
# for an explanation of each
ENABLED_CONTROLLERS: "policy,namespace,serviceaccount,workloadendpoint,node"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
@ -445,7 +447,7 @@ conf:
node:
# for specific details see
# https://docs.projectcalico.org/v3.2/reference/node/configuration
# https://docs.projectcalico.org/v3.3/reference/node/configuration
name: k8s-pod-network
# Cluster type to identify the deployment type
# NOTE: v2 had a list ... v3 a comma separated string
@ -470,6 +472,8 @@ conf:
FELIX_DEFAULTENDPOINTTOHOSTACTION: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
CALICO_IPV4POOL_CIDR: null
# See https://docs.projectcalico.org/v3.3/reference/calicoctl/resources/ippool
CALICO_IPV4POOL_BLOCKSIZE: 26
# Change this to 'off' in environments with direct L2 communication
# Titlecase
CALICO_IPV4POOL_IPIP: "Always"