Use CRI-O with kubeadm.

Before switch to kubeadm we used to use manual approach for installing
all the bits with option for switching container runtimes between docker
and cri-o. With this patch we re-gain that ability with simply setting
appropriate container engine variable for devstack-plugin-contaier to
use either CRI-O or default Docker.

Depends-On: https://review.opendev.org/c/openstack/devstack-plugin-container/+/817231
Change-Id: I273888a7428611b40802dc5dd53fcee864ce43da
This commit is contained in:
Roman Dobosz 2021-10-21 15:01:35 +02:00
parent 3b9312df7c
commit fb994ecddb
8 changed files with 171 additions and 53 deletions

View File

@ -18,6 +18,7 @@
jobs:
- kuryr-kubernetes-tempest
- kuryr-kubernetes-tempest-defaults
- kuryr-kubernetes-tempest-crio
- kuryr-kubernetes-tempest-systemd
- kuryr-kubernetes-tempest-lower-constraints
- kuryr-kubernetes-tempest-multinode
@ -36,7 +37,6 @@
- kuryr-kubernetes-tempest-l2
- kuryr-kubernetes-tempest-pools-namespace
- kuryr-kubernetes-tempest-multinode-ha
- kuryr-kubernetes-tempest-crio
- kuryr-kubernetes-tempest-dual-stack
- project-template:

View File

@ -187,7 +187,12 @@
vars:
devstack_localrc:
CONTAINER_ENGINE: crio
voting: false
CRIO_VERSION: "1.20"
zuul_copy_output:
'{{ devstack_log_dir }}/crio': 'logs'
post-run:
- playbooks/copy-crio-logs.yaml
voting: true
- job:
name: kuryr-kubernetes-tempest-amphora

View File

@ -38,12 +38,22 @@ distribution.")
}
function kubeadm_init {
local cluster_ip_ranges
local output_dir="${DATA_DIR}/kuryr-kubernetes"
local cgroup_driver
local cri_socket
mkdir -p "${output_dir}"
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
local crio_conf="/etc/crio/crio.conf"
cgroup_driver=$(iniget ${crio_conf} crio.runtime cgroup_manager)
cri_socket="unix:///var/run/crio/crio.sock"
else
# docker is used
cgroup_driver=$(docker info -f '{{.CgroupDriver}}')
cri_socket="/var/run/dockershim.sock"
fi
cluster_ip_ranges=()
for service_subnet_id in ${KURYR_SERVICE_SUBNETS_IDS[@]}; do
service_cidr=$(openstack --os-cloud devstack-admin \
@ -85,17 +95,17 @@ scheduler:
leader-elect: "false"
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
bootstrapTokens:
- token: "${KURYR_K8S_TOKEN}"
ttl: 0s
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "${K8S_API_SERVER_IP}"
bindPort: ${K8S_API_SERVER_PORT}
nodeRegistration:
criSocket: "$cri_socket"
kubeletExtraArgs:
cgroup-driver: "$(docker info -f '{{.CgroupDriver}}')"
cni-bin-dir: "$CNI_BIN_DIR"
cni-bin-dir: "$CNI_PLUGIN_DIR"
cni-conf-dir: "$CNI_CONF_DIR"
enable-server: "true"
taints:
@ -106,6 +116,7 @@ kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $cgroup_driver
EOF
args="--config ${output_dir}/kubeadm-init.yaml"
# NOTE(gryf): skip installing kube proxy, kuryr will handle services.
@ -126,9 +137,20 @@ EOF
function kubeadm_join {
local output_dir="${DATA_DIR}/kuryr-kubernetes"
local cgroup_driver
local cri_socket
mkdir -p "${output_dir}"
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
local crio_conf="/etc/crio/crio.conf"
cgroup_driver=$(iniget ${crio_conf} crio.runtime cgroup_manager)
cri_socket="unix:///var/run/crio/crio.sock"
else
# docker is used
cgroup_driver=$(docker info -f '{{.CgroupDriver}}')
cri_socket="/var/run/dockershim.sock"
fi
cluster_ip_ranges=()
for service_subnet_id in ${KURYR_SERVICE_SUBNETS_IDS[@]}; do
service_cidr=$(openstack --os-cloud devstack-admin \
@ -150,13 +172,20 @@ discovery:
tlsBootstrapToken: "${KURYR_K8S_TOKEN}"
kind: JoinConfiguration
nodeRegistration:
criSocket: "$cri_socket"
kubeletExtraArgs:
cgroup-driver: "$(docker info -f '{{.CgroupDriver}}')"
cni-bin-dir: "$CNI_BIN_DIR"
cni-bin-dir: "$CNI_PLUGIN_DIR"
cni-conf-dir: "$CNI_CONF_DIR"
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $cgroup_driver
EOF
sudo -E kubeadm join --ignore-preflight-errors Swap \
--config ${output_dir}/kubeadm-join.yaml

View File

@ -302,6 +302,7 @@ function build_kuryr_container_image {
local target=$1 # controller or cni
local build_args
local build_dir
local tag="kuryr/${target}"
build_dir="${DEST}/kuryr-kubernetes"
pushd "$build_dir"
@ -311,17 +312,19 @@ function build_kuryr_container_image {
`"/opt/kuryr-kubernetes/lower-constraints.txt"
fi
# Build images
# FIXME(dulek): Until https://github.com/containers/buildah/issues/1206 is
# resolved instead of podman we need to use buildah directly,
# hence this awful if clause.
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
sudo buildah bud -t "docker.io/kuryr/${target}" \
-f "${target}.Dockerfile" .
else
container_runtime build -t "kuryr/${target}" \
-f "${target}.Dockerfile" ${build_args} .
# NOTE(gryf): for crio/podman we need to have it tagged with docker.io
# (or whatever registry), or we would need to setup one, otherwise the
# default tag would be 'localhost/kuryr/*' instead of 'kuryr/*' as in
# docker case (which by default becomes 'docker.io/kuryr/*' if no
# registry has been specified). Creating registry for just two images
# is a little bit of overkill, hence the trick with docker.io tag, and
# image pull policy set to "Never" on deployment definition, so that
# we assure for taking images that we built.
tag="docker.io/${tag}"
fi
container_runtime build -t "${tag}" -f "${target}.Dockerfile" \
${build_args} .
popd
}
@ -600,6 +603,7 @@ function generate_cni_daemon_set {
cni_health_server_port=$2
cni_bin_dir=${3:-/opt/cni/bin}
cni_conf_dir=${4:-/etc/cni/net.d}
local var_run=${VAR_RUN_PATH:-/var/run}
mkdir -p "$output_dir"
rm -f "${output_dir}/cni_ds.yml"
cat >> "${output_dir}/cni_ds.yml" << EOF
@ -653,14 +657,31 @@ spec:
mountPath: /etc/cni/net.d
- name: config-volume
mountPath: /etc/kuryr
EOF
if [ "$CONTAINER_ENGINE" != "crio" ]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: proc
mountPath: /host_proc
EOF
fi
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: var-pci
mountPath: /var/pci_address
- name: var-run
mountPath: /var/run
mountPropagation: HostToContainer
EOF
# NOTE(gryf): assuming the --namespaces-dir parameter would not be used,
# otherwise /var/run/$crio_netns_path is all wrong
if [ "$CONTAINER_ENGINE" = "crio" ] && \
[ "${VAR_RUN_PATH}" != "/var/run" ]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: netns
mountPath: /var/run/netns
mountPropagation: HostToContainer
EOF
fi
cat >> "${output_dir}/cni_ds.yml" << EOF
readinessProbe:
httpGet:
@ -684,24 +705,28 @@ EOF
- name: config-volume
configMap:
name: kuryr-config
- name: var-run
hostPath:
path: ${var_run}
EOF
if [[ "$CONTAINER_ENGINE" != "crio" ]]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: proc
hostPath:
path: /proc
EOF
fi
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: var-pci
hostPath:
path: /var/pci_address
EOF
if [[ -n "$VAR_RUN_PATH" ]]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: var-run
if [ "${CONTAINER_ENGINE}" = "crio" ] && \
[ "${VAR_RUN_PATH}" != "/var/run" ]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: netns
hostPath:
path: ${VAR_RUN_PATH}
EOF
else
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: var-run
hostPath:
path: /var/run
path: /var/run/netns
EOF
fi
}
@ -917,7 +942,7 @@ function build_install_kuryr_cni {
pushd "${KURYR_HOME}/kuryr_cni" || exit 1
hack/build-go.sh
sudo install -o "$STACK_USER" -m 0555 -D bin/kuryr-cni \
"${CNI_BIN_DIR}/kuryr-cni"
"${CNI_PLUGIN_DIR}/kuryr-cni"
popd
}
@ -943,6 +968,15 @@ function _create_kuryr_lock_dir {
function configure_kuryr {
local dir
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
# According of the documentation we need those kernel modules for
# CRI-O. They might already be loaded by neutron, so don't fail on it.
# https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cri-o
sudo modprobe overlay || true
sudo modprobe br_netfilter || true
fi
sudo install -d -o "$STACK_USER" "$KURYR_CONFIG_DIR"
"${KURYR_HOME}/tools/generate_config_file_samples.sh"
sudo install -o "$STACK_USER" -m 640 -D \
@ -982,10 +1016,12 @@ function configure_kuryr {
iniset "$KURYR_CONFIG" vif_plug_ovs_privileged helper_command privsep-helper
iniset "$KURYR_CONFIG" vif_plug_linux_bridge_privileged helper_command privsep-helper
# When running kuryr-daemon or CNI in container we need to set up
# some configs.
iniset "$KURYR_CONFIG" cni_daemon docker_mode True
iniset "$KURYR_CONFIG" cni_daemon netns_proc_dir "/host_proc"
if [ "${CONTAINER_ENGINE}" = "docker" ]; then
# When running kuryr-daemon or CNI in container we need to set up
# some configs.
iniset "$KURYR_CONFIG" cni_daemon docker_mode True
iniset "$KURYR_CONFIG" cni_daemon netns_proc_dir "/host_proc"
fi
else
iniset "$KURYR_CONFIG" oslo_concurrency lock_path "$KURYR_LOCK_DIR"
_create_kuryr_lock_dir
@ -1051,7 +1087,7 @@ function _generate_containerized_kuryr_resources {
generate_kuryr_certificates_secret $output_dir $SSL_BUNDLE_FILE
generate_kuryr_service_account $output_dir
generate_controller_deployment $output_dir $KURYR_HEALTH_SERVER_PORT $KURYR_CONTROLLER_HA
generate_cni_daemon_set $output_dir $KURYR_CNI_HEALTH_SERVER_PORT $CNI_BIN_DIR $CNI_CONF_DIR
generate_cni_daemon_set $output_dir $KURYR_CNI_HEALTH_SERVER_PORT $CNI_PLUGIN_DIR $CNI_CONF_DIR
}
function run_containerized_kuryr_resources {
@ -1579,9 +1615,9 @@ function configure_neutron_defaults {
}
function uninstall_kuryr_cni {
sudo rm "${CNI_BIN_DIR}/kuryr-cni"
if [ -z "$(ls -A ${CNI_BIN_DIR})" ]; then
sudo rm -fr "${CNI_BIN_DIR}"
sudo rm "${CNI_PLUGIN_DIR}/kuryr-cni"
if [ -z "$(ls -A ${CNI_PLUGIN_DIR})" ]; then
sudo rm -fr "${CNI_PLUGIN_DIR}"
fi
}

View File

@ -1,5 +1,5 @@
KURYR_HOME=${KURYR_HOME:-$DEST/kuryr-kubernetes}
CNI_BIN_DIR=${CNI_BIN_DIR:-/opt/cni/bin}
CNI_PLUGIN_DIR=${CNI_PLUGIN_DIR:-${CNI_BIN_DIR:-/opt/cni/bin}}
CNI_CONF_DIR=${CNI_CONF_DIR:-$DEST/cni/conf}
KURYR_CONFIG_DIR=${KURYR_CONFIG_DIR:-/etc/kuryr}

View File

@ -0,0 +1,14 @@
- hosts: all
tasks:
- set_fact:
devstack_base_dir: /opt/stack
when: devstack_base_dir is not defined
- name: Copy CRI-O logs
shell:
cmd: "{{ devstack_base_dir }}/kuryr-kubernetes/tools/gate/copy_crio_logs.sh"
executable: /bin/bash
chdir: "{{ zuul.project.src_dir }}"
environment:
DEVSTACK_BASE_DIR: "{{ devstack_base_dir }}"
become: true

29
tools/gate/copy_crio_logs.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash -x
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# This script takes bits from devstack-gate/functions/cleanup_host in a
# more generic approach, so we don't need to actually run devstack on the node
# to cleanup an host.
# copy crio config and logs, if crio is installed - there should be
# configuration, since we modifying it through devstack.
if [ -d /etc/crio ]; then
CRIO_LOG_DIR=${DEVSTACK_BASE_DIR}/logs/crio
mkdir -p "${CRIO_LOG_DIR}/conf"
sudo journalctl -o short-precise --unit crio | \
sudo tee "${CRIO_LOG_DIR}/crio_log.txt" > /dev/null
sudo cp -a /etc/crio "${CRIO_LOG_DIR}/conf"
sudo chown -R zuul:zuul "${CRIO_LOG_DIR}"
fi

View File

@ -25,20 +25,23 @@ mkdir -p ${K8S_LOG_DIR}
mkdir ${HOME}/.kube
sudo cp /opt/stack/.kube/config ${HOME}/.kube/
sudo chown ${USER}:${USER} ${HOME}/.kube/config
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get pods -o yaml --all-namespaces >> ${K8S_LOG_DIR}/pods.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get svc -o yaml --all-namespaces >> ${K8S_LOG_DIR}/services.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get cm -o yaml --all-namespaces >> ${K8S_LOG_DIR}/configmaps.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get deploy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/deployments.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get ds -o yaml --all-namespaces >> ${K8S_LOG_DIR}/daemonsets.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get nodes -o yaml --all-namespaces >> ${K8S_LOG_DIR}/nodes.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get ingress -o yaml --all-namespaces >> ${K8S_LOG_DIR}/ingress.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get namespaces -o yaml >> ${K8S_LOG_DIR}/namespaces.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnets -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnets_crds.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetworks -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworks_crds.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get endpoints -o yaml --all-namespaces >> ${K8S_LOG_DIR}/endpoints.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetpolicy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetpolicy_crds.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrport -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrport_crds.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetworkpolicy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworkpolicy_crds.txt
KCTL="/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config"
$KCTL get pods -o yaml --all-namespaces >> ${K8S_LOG_DIR}/pods.txt
$KCTL get svc -o yaml --all-namespaces >> ${K8S_LOG_DIR}/services.txt
$KCTL get cm -o yaml --all-namespaces >> ${K8S_LOG_DIR}/configmaps.txt
$KCTL get deploy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/deployments.txt
$KCTL get ds -o yaml --all-namespaces >> ${K8S_LOG_DIR}/daemonsets.txt
$KCTL get nodes -o yaml --all-namespaces >> ${K8S_LOG_DIR}/nodes.txt
$KCTL get ingress -o yaml --all-namespaces >> ${K8S_LOG_DIR}/ingress.txt
$KCTL get namespaces -o yaml >> ${K8S_LOG_DIR}/namespaces.txt
$KCTL get kuryrnets -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnets_crds.txt
$KCTL get kuryrnetworks -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworks_crds.txt
$KCTL get endpoints -o yaml --all-namespaces >> ${K8S_LOG_DIR}/endpoints.txt
$KCTL get kuryrnetpolicy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetpolicy_crds.txt
$KCTL get kuryrport -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrport_crds.txt
$KCTL get kuryrnetworkpolicy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworkpolicy_crds.txt
sudo journalctl -o short-precise --unit kubelet | sudo tee ${K8S_LOG_DIR}/kubelet_log.txt > /dev/null
# Kubernetes pods logs
mkdir -p ${K8S_LOG_DIR}/pod_logs
while read -r line
@ -48,9 +51,11 @@ do
containers=`/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config -n ${namespace} get pods ${name} -o jsonpath="{.spec.containers[*].name} {.spec.initContainers[*].name}"`
for container in ${containers}
do
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config logs -n ${namespace} -c ${container} ${name} >> ${K8S_LOG_DIR}/pod_logs/${namespace}-${name}-${container}.txt
/usr/bin/kubectl --kubeconfig=${HOME}/.kube/config logs -n ${namespace} -p -c ${container} ${name} >> ${K8S_LOG_DIR}/pod_logs/${namespace}-${name}-${container}-prev.txt
$KCTL logs -n ${namespace} -c ${container} ${name} >> ${K8S_LOG_DIR}/pod_logs/${namespace}-${name}-${container}.txt
$KCTL logs -n ${namespace} -p -c ${container} ${name} >> ${K8S_LOG_DIR}/pod_logs/${namespace}-${name}-${container}-prev.txt
done
done < <(/usr/bin/kubectl get pods -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace --all-namespaces | tail -n +2)
mkdir -p "${K8S_LOG_DIR}/kubernetes_conf"
sudo cp -a /etc/kubernetes/* "${K8S_LOG_DIR}/kubernetes_conf"
sudo chown -R zuul:zuul ${K8S_LOG_DIR}