Enable default CNI plugins

Currently, only a subset of the default CNI plugins are enabled in
StarlingX.  These plugins are provided by Calico and placed in the
/opt/cni/bin directory.

On CentOS, the full set of CNI plugins that are distributed by the
containernetworking-plugins package are installed under the
default directory for system daemons and utilities at
/usr/libexec/cni

This means that some of the default plugins such as static ipam and
dhcp are not available to be used, as kublet looks at the
/opt/cni/bin directory by default.

This commit changes the cni-bin volume of the Calico, Multus, and
SRIOV CNI daemonsets to refer to the plugin executable path used by
the containernetworking-plugins package.  As such, it is also
required that the --cni-bin-dir option now be passed to kubelet.

In addition, since the config_controller --force option has been
removed, we can safely remove the calico, multus, and SRIOV puppet
templates.  They have already been replaced by the equivalent
ansible bootstrap templates.

Depends-on: https://review.opendev.org/680409
Change-Id: I5a21ccc7cb06ab8f50df19b513335cd4a16211f5
Partial-Bug: #1840391
Signed-off-by: Steven Webster <steven.webster@windriver.com>
This commit is contained in:
Steven Webster 2019-09-17 10:06:36 -05:00
parent d58a7b2e2f
commit 295410ed3d
6 changed files with 72 additions and 1309 deletions

View File

@ -18,7 +18,8 @@ class platform::kubernetes::params (
$k8s_reserved_cpus = undef,
$k8s_reserved_mem = undef,
$k8s_isol_cpus = undef,
$apiserver_cert_san = []
$apiserver_cert_san = [],
$k8s_cni_bin_dir = '/usr/libexec/cni'
) { }
@ -107,6 +108,7 @@ class platform::kubernetes::kubeadm {
$k8s_reserved_cpus = $::platform::kubernetes::params::k8s_reserved_cpus
$k8s_reserved_mem = $::platform::kubernetes::params::k8s_reserved_mem
$k8s_isol_cpus = $::platform::kubernetes::params::k8s_isol_cpus
$k8s_cni_bin_dir = $::platform::kubernetes::params::k8s_cni_bin_dir
$iptables_file = "net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1"
@ -211,12 +213,75 @@ class platform::kubernetes::master::init
$docker_registry = 'docker.io'
}
if str2bool($::is_initial_config_primary) {
# For initial controller install, configure kubernetes from scratch.
$resolv_conf = '/etc/resolv.conf'
if str2bool($::is_initial_k8s_config) {
# This allows subsequent node installs
# Notes regarding ::is_initial_k8s_config check:
# - Ensures block is only run for new node installs (e.g. controller-1)
# or reinstalls. This part is needed only once;
# - Ansible configuration is independently configuring Kubernetes. A retry
# in configuration by puppet leads to failed manifest application.
# This flag is created by Ansible on controller-0;
# - Ansible replay is not impacted by flag creation.
# If alternative k8s registry requires the authentication,
# kubeadm required images need to be pre-pulled on controller
if $k8s_registry != 'k8s.gcr.io' and $::platform::docker::params::k8s_registry_secret != undef {
File['/etc/kubernetes/kubeadm.yaml']
-> platform::docker::login_registry { 'login k8s registry':
registry_url => $k8s_registry,
registry_secret => $::platform::docker::params::k8s_registry_secret
}
-> exec { 'kubeadm to pre pull images':
command => 'kubeadm config images pull --config /etc/kubernetes/kubeadm.yaml',
logoutput => true,
before => Exec['configure master node']
}
-> exec { 'logout k8s registry':
command => "docker logout ${k8s_registry}",
logoutput => true,
}
}
# Create necessary certificate files
file { '/etc/kubernetes/pki':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/etc/kubernetes/pki/ca.crt':
ensure => file,
content => $ca_crt,
owner => 'root',
group => 'root',
mode => '0644',
}
-> file { '/etc/kubernetes/pki/ca.key':
ensure => file,
content => $ca_key,
owner => 'root',
group => 'root',
mode => '0600',
}
-> file { '/etc/kubernetes/pki/sa.key':
ensure => file,
content => $sa_key,
owner => 'root',
group => 'root',
mode => '0600',
}
-> file { '/etc/kubernetes/pki/sa.pub':
ensure => file,
content => $sa_pub,
owner => 'root',
group => 'root',
mode => '0600',
}
# Configure the master node.
file { '/etc/kubernetes/kubeadm.yaml':
-> file { '/etc/kubernetes/kubeadm.yaml':
ensure => file,
content => template('platform/kubeadm.yaml.erb'),
}
@ -237,58 +302,12 @@ class platform::kubernetes::master::init
# Add a bash profile script to set a k8s env variable
-> file {'bash_profile_k8s':
ensure => file,
ensure => present,
path => '/etc/profile.d/kubeconfig.sh',
mode => '0644',
source => "puppet:///modules/${module_name}/kubeconfig.sh"
}
# Deploy Multus as a Daemonset, and Calico is used as the default network
# (a network interface that every pod will be created with), each network
# attachment is made in addition to this default network.
-> file { '/etc/kubernetes/multus.yaml':
ensure => file,
content => template('platform/multus.yaml.erb'),
}
-> exec {'deploy multus daemonset':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/multus.yaml',
logoutput => true,
}
# Configure calico networking using the Kubernetes API datastore.
-> file { '/etc/kubernetes/calico.yaml':
ensure => file,
content => template('platform/calico.yaml.erb'),
}
-> exec { 'install calico networking':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml',
logoutput => true,
}
# Deploy sriov-cni as a Daemonset
-> file { '/etc/kubernetes/sriov-cni.yaml':
ensure => file,
content => template('platform/sriov-cni.yaml.erb'),
}
-> exec {'deploy sriov-cni daemonset':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriov-cni.yaml',
logoutput => true,
}
# Deploy SRIOV network device plugin as a Daemonset
-> file { '/etc/kubernetes/sriovdp-daemonset.yaml':
ensure => file,
content => template('platform/sriovdp-daemonset.yaml.erb'),
}
-> exec {'deploy sriov device plugin daemonset':
command =>
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/sriovdp-daemonset.yaml',
logoutput => true,
}
# Remove the taint from the master node
-> exec { 'remove taint from master node':
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master- || true", # lint:ignore:140chars
@ -323,137 +342,6 @@ class platform::kubernetes::master::init
-> file { '/etc/platform/.initial_k8s_config_complete':
ensure => present,
}
} else {
if str2bool($::is_initial_k8s_config) {
# This allows subsequent node installs
# Notes regarding ::is_initial_k8s_config check:
# - Ensures block is only run for new node installs (e.g. controller-1)
# or reinstalls. This part is needed only once;
# - Ansible configuration is independently configuring Kubernetes. A retry
# in configuration by puppet leads to failed manifest application.
# This flag is created by Ansible on controller-0;
# - Ansible replay is not impacted by flag creation.
# If alternative k8s registry requires the authentication,
# kubeadm required images need to be pre-pulled on controller
if $k8s_registry != 'k8s.gcr.io' and $::platform::docker::params::k8s_registry_secret != undef {
File['/etc/kubernetes/kubeadm.yaml']
-> platform::docker::login_registry { 'login k8s registry':
registry_url => $k8s_registry,
registry_secret => $::platform::docker::params::k8s_registry_secret
}
-> exec { 'kubeadm to pre pull images':
command => 'kubeadm config images pull --config /etc/kubernetes/kubeadm.yaml',
logoutput => true,
before => Exec['configure master node']
}
-> exec { 'logout k8s registry':
command => "docker logout ${k8s_registry}",
logoutput => true,
}
}
# Create necessary certificate files
file { '/etc/kubernetes/pki':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/etc/kubernetes/pki/ca.crt':
ensure => file,
content => $ca_crt,
owner => 'root',
group => 'root',
mode => '0644',
}
-> file { '/etc/kubernetes/pki/ca.key':
ensure => file,
content => $ca_key,
owner => 'root',
group => 'root',
mode => '0600',
}
-> file { '/etc/kubernetes/pki/sa.key':
ensure => file,
content => $sa_key,
owner => 'root',
group => 'root',
mode => '0600',
}
-> file { '/etc/kubernetes/pki/sa.pub':
ensure => file,
content => $sa_pub,
owner => 'root',
group => 'root',
mode => '0600',
}
# Configure the master node.
-> file { '/etc/kubernetes/kubeadm.yaml':
ensure => file,
content => template('platform/kubeadm.yaml.erb'),
}
-> exec { 'configure master node':
command => 'kubeadm init --config=/etc/kubernetes/kubeadm.yaml',
logoutput => true,
}
# Update ownership/permissions for file created by "kubeadm init".
# We want it readable by sysinv and sysadmin.
-> file { '/etc/kubernetes/admin.conf':
ensure => file,
owner => 'root',
group => $::platform::params::protected_group_name,
mode => '0640',
}
# Add a bash profile script to set a k8s env variable
-> file {'bash_profile_k8s':
ensure => present,
path => '/etc/profile.d/kubeconfig.sh',
mode => '0644',
source => "puppet:///modules/${module_name}/kubeconfig.sh"
}
# Remove the taint from the master node
-> exec { 'remove taint from master node':
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master- || true", # lint:ignore:140chars
logoutput => true,
}
# Add kubelet service override
-> file { '/etc/systemd/system/kubelet.service.d/kube-stx-override.conf':
ensure => file,
content => template('platform/kube-stx-override.conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# set kubelet monitored by pmond
-> file { '/etc/pmon.d/kubelet.conf':
ensure => file,
content => template('platform/kubelet-pmond-conf.erb'),
owner => 'root',
group => 'root',
mode => '0644',
}
# Reload systemd
-> exec { 'perform systemctl daemon reload for kubelet override':
command => 'systemctl daemon-reload',
logoutput => true,
}
# Initial kubernetes config done on node
-> file { '/etc/platform/.initial_k8s_config_complete':
ensure => present,
}
}
}
}

View File

@ -1,825 +0,0 @@
---
# Calico Version v3.6
# Based off:
# https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/
# hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
# Original file located in the source tree as calico.yaml.erb.orig
#
# This is the calico configuration file for systems with less than 50 nodes.
#
# Notes when upversioning calico:
#
# Refer to configuration instructions here:
# https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/
# calico
#
# It is important to test in a multi-controller environment (ie: AIO-DX) that
# the pods can be pinged by their endpoint. ie: A pod running on controller-1
# can be pinged from controller-0, and vica versa.
#
# An additional test (run on controller-0) that queries the calico daemon
# health and status
#
# curl -O -L https://github.com/projectcalico/calicoctl/releases/download/
# v3.6.2/calicoctl
# chmod +x calicoctl
# sudo mv calicoctl /usr/local/bin
# export DATASTORE_TYPE=kubernetes
# sudo calicoctl node status
#
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the Calico backend to use.
calico_backend: "bird"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam",
<%- if @pod_network_ipversion == 4 -%>
"assign_ipv4": "true",
<%- else -%>
"assign_ipv4": "false",
<%- end -%>
<%- if @pod_network_ipversion == 6 -%>
"assign_ipv6": "true"
<%- else -%>
"assign_ipv6": "false"
<%- end -%>
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
# Create all the CustomResourceDefinitions needed for
# Calico policy and networking mode.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: "<%= @quay_registry %>/calico/cni:v3.6.2"
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: "<%= @quay_registry %>/calico/cni:v3.6.2"
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
containers:
# Runs node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: "<%= @quay_registry %>/calico/node:v3.6.2"
env:
# Configure inbound failsafe rules
- name: FELIX_FAILSAFEINBOUNDHOSTPORTS
value: "tcp:22, udp:68, tcp:179"
# Configure output failsafe rules
- name: FELIX_FAILSAFEOUTBOUNDHOSTPORTS
value: "udp:53, udp:67, tcp:179"
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
<%- if @pod_network_ipversion == 4 -%>
- name: IP
value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "can-reach=<%= @apiserver_advertise_address %>"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "<%= @pod_network_cidr %>"
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "true"
<%- else -%>
- name: IP
value: "none"
<%- end -%>
<%- if @pod_network_ipversion == 6 -%>
- name: IP6
value: "autodetect"
- name: IP6_AUTODETECTION_METHOD
value: "can-reach=<%= @apiserver_advertise_address %>"
# The default IPv6 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV6POOL_CIDR
value: "<%= @pod_network_cidr %>"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "true"
<%- else -%>
- name: IP6
value: "none"
<%- end -%>
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
<%- if @pod_network_ipversion == 6 -%>
# Enable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "true"
- name: CALICO_ROUTER_ID
value: "hash"
<%- else -%>
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
<%- end -%>
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
volumes:
# Used by node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest deploys the Calico node controller.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: "<%= @quay_registry %>/calico/kube-controllers:v3.6.2"
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml

View File

@ -1,2 +1,2 @@
# Overrides config file for kubelet
KUBELET_EXTRA_ARGS=--node-ip=<%= @node_ip %> <%= @k8s_cpu_manager_opts %>
KUBELET_EXTRA_ARGS=--cni-bin-dir=<%= @k8s_cni_bin_dir %> --node-ip=<%= @node_ip %> <%= @k8s_cpu_manager_opts %>

View File

@ -1,187 +0,0 @@
# Multus Version v3.2
# Based on:
# https://github.com/intel/multus-cni/blob/release-v3/images/multus-daemonset.yml
#
# The following modifications have been made:
#
# - The multus CNI configuration file has been explicitly specified to ensure
# it has a lower lexographic order than the calico CNI configuration file.
#
# - The configMap has been modified to work with Calico rather than Flannel
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
properties:
config:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"delegates": [
{
"cniVersion": "0.3.0",
"name": "k8s-pod-network",
"type": "calico",
"masterplugin": true,
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": 1500,
"ipam": {
"type": "calico-ipam",
<%- if @pod_network_ipversion == 4 -%>
"assign_ipv4": "true",
<%- else -%>
"assign_ipv4": "false",
<%- end -%>
<%- if @pod_network_ipversion == 6 -%>
"assign_ipv6": "true"
<%- else -%>
"assign_ipv6": "false"
<%- end -%>
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
}
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
spec:
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: <%= @docker_registry %>/nfvpe/multus:v3.2
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -cex
- |
#!/bin/bash
sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/05-multus.conf > /usr/src/multus-cni/images/05-multus.conf
<%- if @pod_network_ipversion == 6 -%>
sed -i 's#//\${KUBERNETES_SERVICE_HOST}#//\[\${KUBERNETES_SERVICE_HOST}\]#' /entrypoint.sh
<%- end -%>
/entrypoint.sh --multus-conf-file=/usr/src/multus-cni/images/05-multus.conf
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 05-multus.conf

View File

@ -1,45 +0,0 @@
# SRIOV-CNI Release v1
# Based on:
# https://github.com/intel/sriov-cni/blob/master/images/sriov-cni-daemonset.yaml
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-sriov-cni-ds-amd64
namespace: kube-system
labels:
tier: node
app: sriov-cni
spec:
template:
metadata:
labels:
tier: node
app: sriov-cni
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: kube-sriov-cni
image: <%= @docker_registry %>/starlingx/k8s-cni-sriov:master-centos-stable-latest
securityContext:
privileged: true
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
volumes:
- name: cnibin
hostPath:
path: /opt/cni/bin

View File

@ -1,68 +0,0 @@
# SRIOV device CNI plugin
# Based on:
# https://github.com/intel/sriov-cni/blob/master/images/sriov-cni-daemonset.yaml
#
# The following modifications have been made:
#
# - A nodeSelector of 'sriovdp' has been added to ensure the sriov device plugin
# pods only run on appropriately labelled nodes.
# - The config hostPath is explicitly set to 'File'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sriov-device-plugin
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-amd64
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
template:
metadata:
labels:
tier: node
app: sriovdp
spec:
nodeSelector:
beta.kubernetes.io/arch: amd64
sriovdp: enabled
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
containers:
- name: kube-sriovdp
image: <%= @docker_registry %>/starlingx/k8s-plugins-sriov-network-device:master-centos-stable-latest
args:
- --log-level=10
securityContext:
privileged: false
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/device-plugins/
readOnly: false
- name: sysfs
mountPath: /sys
readOnly: true
- name: config
mountPath: /etc/pcidp/config.json
readOnly: true
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/device-plugins/
- name: sysfs
hostPath:
path: /sys
- name: config
hostPath:
path: /etc/pcidp/config.json
type: File