Uprev CNI images for k8s v1.24-v1.28

This commit uprevs the container networking images as follows:

calico: v3.25.0 -> v3.26.4
multus: v3.9.3 -> v4.0.2
sriov-cni: v2.7.0 -> unchanged
sriov-device-plugin: v3.5.1 -> v3.6.2

The following changes have been made:
- create a new directory for k8s 1.28.4
- symlink k8s 1.24-1.27 directories to k8s v1.28.4
- Apply Starlingx custom changes on top of base

Testing:

- Ensure uprev'd images work on a fresh install with k8s 1.28.4
- Successful system deployment (bootstrap and unlock)
- Perform several networking operations on k8s 1.28.4:
  - Calico:
    * pod -> pod connectivity
    * pod -> service connectivity
    * ingress connectivity
    * IPAM testing
  - Multus / SR-IOV verification:
    * Run the SR-IOV automated tests with a full pass
  - Test IPv4 and IPv6:
    * Ensure all pods come up under each environment
    * Test pod -> pod connectivity on both
  - Test manual upgrade from k8s 1.24 -> 1.28

Story: 2010639
Task: 49710

Change-Id: Ife456d63043825476c17e91e310d8283f829f7f4
Signed-off-by: Mohammad Issa <mohammad.issa@windriver.com>
This commit is contained in:
Mohammad Issa 2024-03-13 17:19:30 +00:00
parent 83a8421dc4
commit 32f6cb1817
19 changed files with 278 additions and 5389 deletions

View File

@ -1,297 +0,0 @@
---
# Multus Version v3.9.2
# Based on:
# https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/v3.9.2/deployments/
# multus-daemonset.yml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/multus-cni/blob/v3.9.2/LICENSE
#
# The following modifications have been made:
#
# - The multus CNI configuration file has been explicitly specified to ensure
# it has a lower lexographic order than the calico CNI configuration file.
# - The configMap has been modified to work with Calico rather than Flannel
# - The tuning plugin is used to update sysctl tcp_keepalive timers.
# - The portmap plugin is enabled to allow forwarding from one or more ports
# on the host to the container
# - The cnibin volume hostPath is made variable
# - An updateStrategy was added to the DaemonSet spec to allow controlled template
# updates of pods with "kubectl apply".
# - The attributes confDir, cniDir and binDir are added to the configmap of
# multus-cni-config.
# - Due to the limitations on resource tracking and CPU usage in Kubernetes,
# platform pod's CPU requests are set to zero and must not request CPU resources.
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config.v1
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"cniVersion": "0.3.1",
"confDir": "/etc/cni/net.d",
"cniDir": "/var/lib/cni/multus",
"binDir": "/var/opt/cni/bin",
"logFile": "/var/log/multus.log",
"logLevel": "debug",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "chain",
"plugins": [
{
"cniVersion": "0.3.1",
"name": "k8s-pod-network",
"type": "calico",
"masterplugin": true,
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": 1500,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "{{ "true" if cluster_network_ipv4 else "false" }}",
"assign_ipv6": "{{ "true" if cluster_network_ipv6 else "false" }}"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
}
},
{
"name": "sysctl-tuning",
"type": "tuning",
"sysctl": {
"net.ipv4.tcp_keepalive_intvl": "1",
"net.ipv4.tcp_keepalive_probes": "5",
"net.ipv4.tcp_keepalive_time": "5"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
serviceAccountName: multus
imagePullSecrets:
- name: registry-local-secret
containers:
- name: kube-multus
image: "{{ local_registry }}/{{ multus_img }}"
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -cex
- |
#!/bin/bash
sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/05-multus.conf > /usr/src/multus-cni/05-multus.conf
{% if cluster_network_ipv6 -%}
sed -i 's#//\${KUBERNETES_SERVICE_HOST}#//\[\${KUBERNETES_SERVICE_HOST}\]#' /entrypoint.sh
{% endif -%}
/entrypoint.sh --multus-conf-file=/usr/src/multus-cni/05-multus.conf
resources:
requests:
memory: "50Mi"
limits:
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
initContainers:
- name: install-multus-binary
image: "{{ local_registry }}/{{ multus_img }}"
command:
- "cp"
- "/usr/src/multus-cni/bin/multus"
- "/host/opt/cni/bin/multus"
resources:
requests:
cpu: 0
memory: "15Mi"
securityContext:
privileged: true
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
mountPropagation: Bidirectional
terminationGracePeriodSeconds: 10
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: {{ kubelet_cni_bin_dir }}
- name: multus-cfg
configMap:
name: multus-cni-config.v1
items:
- key: cni-conf.json
path: 05-multus.conf

View File

@ -1,77 +0,0 @@
# SRIOV-CNI Release v2
# Based on:
# https://raw.githubusercontent.com/k8snetworkplumbingwg/sriov-cni/v2.6.3/images/k8s-v1.16/
# sriov-cni-daemonset.yaml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/sriov-cni/blob/v2.6.3/LICENSE
#
# The following modifications have been made:
#
# - The daemonset is modified to tolerate all NoSchedule taints
# - The cnibin volume hostPath is made variable
# - An updateStrategy was added to the DaemonSet spec to allow controlled template
# updates of pods with "kubectl apply".
# - The image is set to a stable starlingX version
# - The 'imagePullPolicy: Never' is omitted
# - For k8s 1.19, the matchLabels are the same as the k8s 1.18 labels to
# allow a rolling update to succeed.
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-cni-ds-amd64
namespace: kube-system
labels:
tier: node
app: sriov-cni
spec:
selector:
matchLabels:
tier: node
app: sriov-cni
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: sriov-cni
tier: node
app: sriov-cni
spec:
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
imagePullSecrets:
- name: registry-local-secret
containers:
- name: kube-sriov-cni
image: "{{ local_registry }}/{{ sriov_cni_img }}"
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
resources:
requests:
memory: "50Mi"
limits:
memory: "50Mi"
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
volumes:
- name: cnibin
hostPath:
path: {{ kubelet_cni_bin_dir }}

View File

@ -1,111 +0,0 @@
# SRIOV device CNI plugin version 3.5.1
# Based on:
# https://raw.githubusercontent.com/k8snetworkplumbingwg/sriov-network-device-plugin/v3.5.1/
# deployments/k8s-v1.16/sriovdp-daemonset.yaml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin/blob/v3.5.1/LICENSE
#
# The following modifications have been made:
#
# - A nodeSelector of 'sriovdp' has been added to ensure the sriov device plugin
# pods only run on appropriately labelled nodes.
# - The config hostPath is explicitly set to 'File'
# - The daemonset is modified to tolerate all NoSchedule taints
# - An updateStrategy was added to the DaemonSet spec to allow controlled template
# updates of pods with "kubectl apply".
# - The image is set to a stable starlingX version
# - The default configMap is not used. Rather, a hostPath to the config.json file
# is used, as resources are populated and based on datanetwork names.
# - For k8s 1.19, the matchLabels are the same as the k8s 1.18 labels to
# allow a rolling update to succeed.
# - Set CPU requests to 0
#
# Copyright (c) 2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sriov-device-plugin
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-amd64
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
selector:
matchLabels:
tier: node
app: sriovdp
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: sriov-device-plugin
tier: node
app: sriovdp
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
sriovdp: enabled
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
imagePullSecrets:
- name: registry-local-secret
containers:
- name: kube-sriovdp
image: "{{ local_registry }}/{{ sriov_network_device_img }}"
imagePullPolicy: IfNotPresent
args:
- --log-dir=sriovdp
- --log-level=10
securityContext:
privileged: true
resources:
requests:
cpu: 0
memory: "40Mi"
limits:
cpu: 1
memory: "200Mi"
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/device-plugins/
readOnly: false
- name: log
mountPath: /var/log
- name: config
mountPath: /etc/pcidp/config.json
readOnly: true
- name: device-info
mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/device-plugins/
- name: log
hostPath:
path: /var/log
- name: device-info
hostPath:
path: /var/run/k8s.cni.cncf.io/devinfo/dp
type: DirectoryOrCreate
- name: config
hostPath:
path: /etc/pcidp/config.json
type: File

View File

@ -1,10 +1,10 @@
---
# Calico Version v3.25.0
# Calico Version v3.26.4
# Based off:
# https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml
# https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico.yaml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/projectcalico/calico/blob/v3.25.0/calico/LICENSE
# https://github.com/projectcalico/calico/blob/v3.26.4/calico/LICENSE
#
# The following modifications have been made:
#
@ -16,7 +16,7 @@
# - Do not use default ippools. Create them explicitly for ipv4 / ipv6
# - Set CPU requests to 0
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -71,6 +71,13 @@ metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
@ -317,6 +324,130 @@ status:
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null
name: bgpfilters.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPFilter
listKind: BGPFilterList
plural: bgpfilters
singular: bgpfilter
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPFilterSpec contains the IPv4 and IPv6 filter rules of
the BGP Filter.
properties:
exportV4:
description: The ordered set of IPv4 BGPFilter rules acting on exporting
routes to a peer.
items:
description: BGPFilterRuleV4 defines a BGP filter rule consisting
a single IPv4 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
exportV6:
description: The ordered set of IPv6 BGPFilter rules acting on exporting
routes to a peer.
items:
description: BGPFilterRuleV6 defines a BGP filter rule consisting
a single IPv6 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
importV4:
description: The ordered set of IPv4 BGPFilter rules acting on importing
routes from a peer.
items:
description: BGPFilterRuleV4 defines a BGP filter rule consisting
a single IPv4 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
importV6:
description: The ordered set of IPv6 BGPFilter rules acting on importing
routes from a peer.
items:
description: BGPFilterRuleV6 defines a BGP filter rule consisting
a single IPv6 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
@ -352,6 +483,11 @@ spec:
description: The AS Number of the peer.
format: int32
type: integer
filters:
description: The ordered set of BGPFilters applied on this BGP peer.
items:
type: string
type: array
keepOriginalNextHop:
description: Option to keep the original nexthop field when routes
are sent to a BGP Peer. Setting "true" configures the selected BGP
@ -898,6 +1034,13 @@ spec:
connections. The only reason to disable it is for debugging purposes. [Default:
true]'
type: boolean
bpfDSROptoutCIDRs:
description: BPFDSROptoutCIDRs is a list of CIDRs which are excluded
from DSR. That is, clients in those CIDRs will accesses nodeports
as if BPFExternalServiceMode was set to Tunnel.
items:
type: string
type: array
bpfDataIfacePattern:
description: BPFDataIfacePattern is a regular expression that controls
which interfaces Felix should attach BPF programs to in order to
@ -921,7 +1064,7 @@ spec:
description: 'BPFEnforceRPF enforce strict RPF on all host interfaces
with BPF programs regardless of what is the per-interfaces or global
setting. Possible values are Disabled, Strict or Loose. [Default:
Strict]'
Loose]'
type: string
bpfExtToServiceConnmark:
description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
@ -1186,7 +1329,7 @@ spec:
type: integer
healthTimeoutOverrides:
description: HealthTimeoutOverrides allows the internal watchdog timeouts
of individual subcomponents to be overriden. This is useful for
of individual subcomponents to be overridden. This is useful for
working around "false positive" liveness timeouts that can occur
in particularly stressful workloads or if CPU is constrained. For
a list of active subcomponents, see Felix's logs.
@ -1246,6 +1389,12 @@ spec:
type: string
iptablesFilterAllowAction:
type: string
iptablesFilterDenyAction:
description: IptablesFilterDenyAction controls what happens to traffic
that is denied by network policy. By default Calico blocks traffic
with an iptables "DROP" action. If you want to use "REJECT" action
instead you can configure it in here.
type: string
iptablesLockFilePath:
description: 'IptablesLockFilePath is the location of the iptables
lock file. You may need to change this if the lock file is not in
@ -4256,7 +4405,7 @@ rules:
resources:
- serviceaccounts/token
resourceNames:
- calico-node
- calico-cni-plugin
verbs:
- create
# The CNI plugin needs to get pods, nodes, and namespaces.
@ -4273,7 +4422,7 @@ rules:
resources:
- endpointslices
verbs:
- watch
- watch
- list
- apiGroups: [""]
resources:
@ -4327,6 +4476,7 @@ rules:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- bgpfilters
- globalbgpconfigs
- bgpconfigurations
- ippools
@ -4410,6 +4560,41 @@ rules:
verbs:
- get
---
# Source: calico/templates/calico-node-rbac.yaml
# CNI cluster role
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-cni-plugin
rules:
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
- clusterinformations
- ippools
- ipreservations
- ipamconfigs
verbs:
- get
- list
- create
- update
- delete
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -4438,6 +4623,20 @@ subjects:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
@ -4699,7 +4898,7 @@ spec:
privileged: true
resources:
requests:
cpu: 0
cpu: 0
lifecycle:
preStop:
exec:
@ -4894,4 +5093,3 @@ spec:
---
# Source: calico/templates/configure-canal.yaml

View File

@ -1,11 +1,11 @@
---
# Multus Version v3.9.3
# Multus Version v4.0.2
# Based on:
# https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/v3.9.3/deployments/
# https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/v4.0.2/deployments/
# multus-daemonset.yml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/multus-cni/blob/v3.9.3/LICENSE
# https://github.com/k8snetworkplumbingwg/multus-cni/blob/v4.0.2/LICENSE
#
# The following modifications have been made:
#
@ -22,8 +22,12 @@
# multus-cni-config.
# - Due to the limitations on resource tracking and CPU usage in Kubernetes,
# platform pod's CPU requests are set to zero and must not request CPU resources.
# - multus v4.0.2 changes thin container base image to distroless,
# hence using --multus-conf-file=auto instead of custom conf file as done previously.
# - multus v4.0.2 has an issue with nested delegate misconfiguration in the conf file;
# Adding an initContainer that deletes the multus cni configuration on startup.
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -235,21 +239,13 @@ spec:
containers:
- name: kube-multus
image: "{{ local_registry }}/{{ multus_img }}"
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /bin/bash
- -cex
- |
#!/bin/bash
sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/05-multus.conf > /usr/src/multus-cni/05-multus.conf
{% if cluster_network_ipv6 -%}
sed -i 's#//\${KUBERNETES_SERVICE_HOST}#//\[\${KUBERNETES_SERVICE_HOST}\]#' /entrypoint.sh
{% endif -%}
/entrypoint.sh --multus-conf-file=/usr/src/multus-cni/05-multus.conf
command: ["/thin_entrypoint"]
args:
- "--multus-conf-file=auto"
- "--multus-autoconfig-dir=/host/etc/cni/net.d"
- "--cni-conf-dir=/host/etc/cni/net.d"
- "--multus-log-file=/var/log/multus.log"
- "--multus-log-level=debug"
resources:
requests:
memory: "50Mi"
@ -267,10 +263,10 @@ spec:
initContainers:
- name: install-multus-binary
image: "{{ local_registry }}/{{ multus_img }}"
command:
- "cp"
- "/usr/src/multus-cni/bin/multus"
- "/host/opt/cni/bin/multus"
command: ["/install_multus"]
args:
- "--type"
- "thin"
resources:
requests:
cpu: "0"
@ -281,6 +277,18 @@ spec:
- name: cnibin
mountPath: /host/opt/cni/bin
mountPropagation: Bidirectional
# 00-multus.conf is generated with a nested delegate misconfiguration.
# Use initcontainer 'delete-multus-conf' to re-generate 00-multus.conf on startup.
# This upstream bug was found in v4.0.2.
- name: delete-multus-conf
image: "alpine"
command: ["rm"]
args:
- "-f"
- "/host/etc/cni/net.d/00-multus.conf"
volumeMounts:
- name: cni
mountPath: "/host/etc/cni/net.d"
terminationGracePeriodSeconds: 10
volumes:
- name: cni
@ -294,4 +302,4 @@ spec:
name: multus-cni-config.v1
items:
- key: cni-conf.json
path: 05-multus.conf
path: 00-multus.conf

View File

@ -1,4 +1,4 @@
# SRIOV-CNI Release v2
# SRIOV-CNI Version v2.7.0
# Based on:
# https://raw.githubusercontent.com/k8snetworkplumbingwg/sriov-cni/v2.7.0/images/k8s-v1.16/
# sriov-cni-daemonset.yaml
@ -17,7 +17,7 @@
# - For k8s 1.19, the matchLabels are the same as the k8s 1.18 labels to
# allow a rolling update to succeed.
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -75,3 +75,4 @@ spec:
- name: cnibin
hostPath:
path: {{ kubelet_cni_bin_dir }}

View File

@ -1,10 +1,10 @@
# SRIOV device CNI plugin version 3.5.1
# SRIOV device CNI plugin version 3.6.2
# Based on:
# https://raw.githubusercontent.com/k8snetworkplumbingwg/sriov-network-device-plugin/v3.5.1/
# deployments/k8s-v1.16/sriovdp-daemonset.yaml
# https://raw.githubusercontent.com/k8snetworkplumbingwg/sriov-network-device-plugin/v3.6.2/
# deployments/sriovdp-daemonset.yaml
#
# This file is licensed under Apache 2.0. You can obtain a copy of the license at:
# https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin/blob/v3.5.1/LICENSE
# https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin/blob/v3.6.2/LICENSE
#
# The following modifications have been made:
#
@ -21,7 +21,7 @@
# allow a rolling update to succeed.
# - Set CPU requests to 0
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -59,7 +59,7 @@ spec:
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
kubernetes.io/arch: amd64
sriovdp: enabled
tolerations:
- operator: Exists
@ -87,6 +87,9 @@ spec:
- name: devicesock
mountPath: /var/lib/kubelet/device-plugins/
readOnly: false
- name: plugins-registry
mountPath: /var/lib/kubelet/plugins_registry/
readOnly: false
- name: log
mountPath: /var/log
- name: config
@ -98,6 +101,9 @@ spec:
- name: devicesock
hostPath:
path: /var/lib/kubelet/device-plugins/
- name: plugins-registry
hostPath:
path: /var/lib/kubelet/plugins_registry/
- name: log
hostPath:
path: /var/log
@ -109,3 +115,4 @@ spec:
hostPath:
path: /etc/pcidp/config.json
type: File

View File

@ -2,12 +2,12 @@
# System images that are pre-pulled and pushed to local registry
n3000_opae_img: docker.io/starlingx/n3000-opae:stx.8.0-v1.0.2
kubernetes_entrypoint_img: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
calico_cni_img: quay.io/calico/cni:v3.24.0
calico_node_img: quay.io/calico/node:v3.24.0
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.24.0
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.2
sriov_cni_img: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.6.3
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.5.1
calico_cni_img: quay.io/calico/cni:v3.26.4
calico_node_img: quay.io/calico/node:v3.26.4
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.26.4
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.0.2
sriov_cni_img: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.7.0
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.6.2
# Nginx images
nginx_ingress_controller_img: registry.k8s.io/ingress-nginx/controller:v1.9.3
nginx_kube_webhook_certgen_img: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0

View File

@ -2,12 +2,12 @@
# System images that are pre-pulled and pushed to local registry
n3000_opae_img: docker.io/starlingx/n3000-opae:stx.8.0-v1.0.2
kubernetes_entrypoint_img: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
calico_cni_img: quay.io/calico/cni:v3.25.0
calico_node_img: quay.io/calico/node:v3.25.0
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.25.0
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3
calico_cni_img: quay.io/calico/cni:v3.26.4
calico_node_img: quay.io/calico/node:v3.26.4
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.26.4
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.0.2
sriov_cni_img: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.7.0
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.5.1
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.6.2
intel_qat_plugin_img: docker.io/intel/intel-qat-plugin:0.26.0
intel_gpu_plugin_img: docker.io/intel/intel-gpu-plugin:0.26.0
intel_gpu_initcontainer_img: docker.io/intel/intel-gpu-initcontainer:0.26.0

View File

@ -2,12 +2,12 @@
# System images that are pre-pulled and pushed to local registry
n3000_opae_img: docker.io/starlingx/n3000-opae:stx.8.0-v1.0.2
kubernetes_entrypoint_img: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
calico_cni_img: quay.io/calico/cni:v3.25.0
calico_node_img: quay.io/calico/node:v3.25.0
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.25.0
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3
calico_cni_img: quay.io/calico/cni:v3.26.4
calico_node_img: quay.io/calico/node:v3.26.4
calico_kube_controllers_img: quay.io/calico/kube-controllers:v3.26.4
multus_img: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.0.2
sriov_cni_img: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.7.0
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.5.1
sriov_network_device_img: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.6.2
intel_qat_plugin_img: docker.io/intel/intel-qat-plugin:0.26.0
intel_gpu_plugin_img: docker.io/intel/intel-gpu-plugin:0.26.0
intel_gpu_initcontainer_img: docker.io/intel/intel-gpu-initcontainer:0.26.0