cri-o support

This commit adds support for cri-o by changing the binary initially used
to run CNI plugin to runc and falling back to docker only in case it's
not available.

Also DevStack support for installing and configuring Kubernetes with
cri-o is added.

Implements: blueprint crio-support
Depends-On: Ib049d66058429e499f5d0932c4a749820bec73ff
Depends-On: Ic3c7d355a455298f43e37fb2aceddfd1e7eefaf2
Change-Id: I081edf0dbd4eb57826399c4820376381950080ed
This commit is contained in:
Michał Dulko 2018-11-26 10:52:10 +01:00
parent 7ba64a87da
commit eecd44d335
5 changed files with 156 additions and 73 deletions

View File

@ -16,20 +16,39 @@ finder="
import json
import sys
mode = 'docker' if len(sys.argv) == 1 else sys.argv[1]
if mode == 'docker':
label_key = 'Labels'
id_key = 'Id'
else:
label_key = 'annotations'
id_key = 'id'
containers=json.load(sys.stdin)
# Looping over all the containers until we find the right one. We print it.
for container in containers:
if ('Labels' in container and
container['Labels'].get('io.kubernetes.pod.name') == '${KURYR_CNI_POD_NAME}' and
container['Labels'].get('io.kubernetes.pod.namespace') == '${POD_NAMESPACE}' and
container['Labels'].get('io.kubernetes.docker.type') == 'container'):
print(container['Id'])
if (label_key in container and
container[label_key].get('io.kubernetes.pod.name') == '${KURYR_CNI_POD_NAME}' and
container[label_key].get('io.kubernetes.pod.namespace') == '${POD_NAMESPACE}' and
container[label_key].get('io.kubernetes.container.name') != 'POD'):
print(container[id_key])
break
"
# TODO(dulek): We might want to fetch socket path from config.
CONTAINERID=\`curl --unix-socket /var/run/docker.sock http://v1.24/containers/json 2> /dev/null | python -c "\${finder}"\`
envs=(\$(env | grep ^CNI_))
if command -v runc > /dev/null; then
# We have runc binary, let's see if that works.
CONTAINERID=\`runc list -f json 2> /dev/null | python -c "\${finder}" runc\`
if [[ ! -z \${CONTAINERID} ]]; then
exec runc exec \${envs[@]/#/--env } "\${CONTAINERID}" kuryr-cni --config-file /etc/kuryr/kuryr.conf
fi
fi
# Fall back to using Docker binary.
# TODO(dulek): We might want to fetch socket path from config.
CONTAINERID=\`curl --unix-socket /var/run/docker.sock http://v1.24/containers/json 2> /dev/null | python -c "\${finder}" docker\`
docker exec \${envs[@]/#/--env } -i "\${CONTAINERID}" kuryr-cni --config-file /etc/kuryr/kuryr.conf
EOF

View File

@ -117,8 +117,8 @@ function get_container {
fi
image="${image_name}:${version}"
if [ -z "$(docker images -q "$image")" ]; then
docker pull "$image"
if [ -z "$(container_runtime images -q "$image")" ]; then
container_runtime pull "$image"
fi
}
@ -131,16 +131,17 @@ function run_container {
# Runs a detached container and uses devstack's run process to monitor
# its logs
local name
local docker_bin
docker_bin=$(which docker)
name="$1"
shift
args="$@"
$docker_bin create --name $name $args
container_runtime create --name $name $args
run_process "$name" \
"$docker_bin start --attach $name"
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
run_process "$name" "$(which podman) start --attach $name" root root
else
run_process "$name" "$(which docker) start --attach $name"
fi
}
# stop_container
@ -151,8 +152,8 @@ function stop_container {
local name
name="$1"
docker kill "$name"
docker rm "$name"
container_runtime kill "$name"
container_runtime rm "$name"
stop_process "$name"
}
@ -366,12 +367,22 @@ function build_kuryr_containers() {
cni_buildtool_args="${cni_buildtool_args} --no-daemon"
fi
if [[ "$CONTAINER_ENGINE" == "crio" ]]; then
cni_buildtool_args="${cni_buildtool_args} --podman"
fi
# Build controller image
sudo docker build \
-t kuryr/controller -f "$controller_dockerfile" .
# FIXME(dulek): Until https://github.com/containers/buildah/issues/1206 is
# resolved instead of podman we need to use buildah directly,
# hence this awful if clause.
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
sudo buildah bud -t docker.io/kuryr/controller -f "$controller_dockerfile" .
else
container_runtime build -t kuryr/controller -f "$controller_dockerfile" .
fi
# Build CNI image
sudo "./tools/build_cni_daemonset_image" $cni_buildtool_args
"./tools/build_cni_daemonset_image" $cni_buildtool_args
popd
}
@ -1363,6 +1374,7 @@ function docker_install_ca_certs {
local registry_hostnames
local destdir
# TODO(dulek): Support for CRI-O.
registry_hostnames=(${1//,/ })
for hostname in ${registry_hostnames[@]}; do
destdir="/etc/docker/certs.d/${hostname}:5000"
@ -1393,6 +1405,7 @@ function configure_and_run_registry {
local registry_ip
local hostnames
# TODO(dulek): Support for CRI-O.
service_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \

View File

@ -11,6 +11,14 @@
# License for the specific language governing permissions and limitations
# under the License.
function container_runtime {
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
sudo podman "$@"
else
docker "$@"
fi
}
function create_kuryr_account {
if is_service_enabled kuryr-kubernetes; then
create_service_user "kuryr" "admin"
@ -551,59 +559,71 @@ function run_k8s_api {
cluster_ip_range="$service_cidr"
fi
run_container kubernetes-api \
--net host \
--restart on-failure \
--volume="${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw" \
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \
/hyperkube apiserver \
--service-cluster-ip-range="${cluster_ip_range}" \
--insecure-bind-address=0.0.0.0 \
--insecure-port="${KURYR_K8S_API_PORT}" \
--etcd-servers="http://${SERVICE_HOST}:${ETCD_PORT}" \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota \
--client-ca-file=/srv/kubernetes/ca.crt \
--basic-auth-file=/srv/kubernetes/basic_auth.csv \
--min-request-timeout=300 \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key \
--token-auth-file=/srv/kubernetes/known_tokens.csv \
--allow-privileged=true \
--v=2 \
--logtostderr=true
local command
command=(--net=host
--volume=${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw)
if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
command+=(--restart=on-failure)
fi
command+=(${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}
/hyperkube apiserver
--service-cluster-ip-range=${cluster_ip_range}
--insecure-bind-address=0.0.0.0
--insecure-port=${KURYR_K8S_API_PORT}
--etcd-servers=http://${SERVICE_HOST}:${ETCD_PORT}
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
--client-ca-file=/srv/kubernetes/ca.crt
--basic-auth-file=/srv/kubernetes/basic_auth.csv
--min-request-timeout=300
--tls-cert-file=/srv/kubernetes/server.cert
--tls-private-key-file=/srv/kubernetes/server.key
--token-auth-file=/srv/kubernetes/known_tokens.csv
--allow-privileged=true
--v=2
--logtostderr=true)
run_container kubernetes-api "${command[@]}"
}
function run_k8s_controller_manager {
# Runs Hyperkube's Kubernetes controller manager
wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL"
run_container kubernetes-controller-manager \
--net host \
--volume="${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw" \
--restart on-failure \
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \
/hyperkube controller-manager \
--master="$KURYR_K8S_API_URL" \
--service-account-private-key-file=/srv/kubernetes/server.key \
--root-ca-file=/srv/kubernetes/ca.crt \
--min-resync-period=3m \
--v=2 \
--logtostderr=true
local command
command=(--net=host
--volume=${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw)
if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
command+=(--restart=on-failure)
fi
command+=(${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}
/hyperkube controller-manager
--master=$KURYR_K8S_API_URL
--service-account-private-key-file=/srv/kubernetes/server.key
--root-ca-file=/srv/kubernetes/ca.crt
--min-resync-period=3m
--v=2
--logtostderr=true)
run_container kubernetes-controller-manager "${command[@]}"
}
function run_k8s_scheduler {
# Runs Hyperkube's Kubernetes scheduler
wait_for "Kubernetes API Server" "$KURYR_K8S_API_URL"
run_container kubernetes-scheduler \
--net host \
--volume="${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw" \
--restart on-failure \
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \
/hyperkube scheduler \
--master="$KURYR_K8S_API_URL" \
--v=2 \
--logtostderr=true
local command
command=(--net=host
--volume=${KURYR_HYPERKUBE_DATA_DIR}:/srv/kubernetes:rw)
if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
command+=(--restart=on-failure)
fi
command+=(${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}
/hyperkube scheduler
--master=$KURYR_K8S_API_URL
--v=2
--logtostderr=true)
run_container kubernetes-scheduler "${command[@]}"
}
function prepare_kubeconfig {
@ -622,16 +642,24 @@ function extract_hyperkube {
tmp_loopback_cni_path="/tmp/loopback"
tmp_nsenter_path="/tmp/nsenter"
hyperkube_container=$(docker run -d \
hyperkube_container=$(container_runtime run -d \
--net host \
"${KURYR_HYPERKUBE_IMAGE}:${KURYR_HYPERKUBE_VERSION}" \
/bin/false)
docker cp "${hyperkube_container}:/hyperkube" "$tmp_hyperkube_path"
docker cp "${hyperkube_container}:/opt/cni/bin/loopback" \
"$tmp_loopback_cni_path"
docker cp "${hyperkube_container}:/usr/bin/nsenter" "$tmp_nsenter_path"
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
mnt=`container_runtime mount "${hyperkube_container}"`
sudo cp "${mnt}/hyperkube" "$tmp_hyperkube_path"
sudo cp "${mnt}/opt/cni/bin/loopback" "$tmp_loopback_cni_path"
sudo cp "${mnt}/usr/bin/nsenter" "$tmp_nsenter_path"
container_runtime umount ${hyperkube_container}
else
container_runtime cp "${hyperkube_container}:/hyperkube" "$tmp_hyperkube_path"
container_runtime cp "${hyperkube_container}:/opt/cni/bin/loopback" \
"$tmp_loopback_cni_path"
container_runtime cp "${hyperkube_container}:/usr/bin/nsenter" "$tmp_nsenter_path"
fi
docker rm --force "$hyperkube_container"
container_runtime rm --force "$hyperkube_container"
sudo install -o "$STACK_USER" -m 0555 -D "$tmp_hyperkube_path" \
"$KURYR_HYPERKUBE_BINARY"
sudo install -o "$STACK_USER" -m 0555 -D "$tmp_loopback_cni_path" \
@ -663,16 +691,12 @@ function run_k8s_kubelet {
# adding Python and all our CNI/binding dependencies.
local command
local minor_version
local cgroup_driver
cgroup_driver="$(docker info|awk '/Cgroup/ {print $NF}')"
sudo mkdir -p "${KURYR_HYPERKUBE_DATA_DIR}/"{kubelet,kubelet.cert}
command="$KURYR_HYPERKUBE_BINARY kubelet\
--kubeconfig=${HOME}/.kube/config \
--allow-privileged=true \
--v=2 \
--cgroup-driver=$cgroup_driver \
--address=0.0.0.0 \
--enable-server \
--network-plugin=cni \
@ -681,6 +705,22 @@ function run_k8s_kubelet {
--cert-dir=${KURYR_HYPERKUBE_DATA_DIR}/kubelet.cert \
--root-dir=${KURYR_HYPERKUBE_DATA_DIR}/kubelet"
if [[ ${CONTAINER_ENGINE} == 'docker' ]]; then
command+=" --cgroup-driver $(docker info|awk '/Cgroup/ {print $NF}')"
elif [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
local crio_conf
crio_conf=/etc/crio/crio.conf
command+=" --cgroup-driver=$(iniget ${crio_conf} crio.runtime cgroup_manager)"
command+=" --container-runtime=remote --container-runtime-endpoint=unix:///var/run/crio/crio.sock --runtime-request-timeout=10m"
# We need to reconfigure CRI-O in this case as well.
# FIXME(dulek): This should probably go to devstack-plugin-container
iniset -sudo ${crio_conf} crio.network network_dir \"${CNI_CONF_DIR}\"
iniset -sudo ${crio_conf} crio.network plugin_dir \"${CNI_BIN_DIR}\"
sudo systemctl --no-block restart crio.service
fi
declare -r min_not_require_kubeconfig_ver="1.10.0"
if [[ "$KURYR_HYPERKUBE_VERSION" == "$(echo -e "${KURYR_HYPERKUBE_VERSION}\n${min_not_require_kubeconfig_ver}" | sort -V | head -n 1)" ]]; then
# Version 1.10 did away with that config option

View File

@ -0,0 +1,5 @@
---
features:
- |
Added support for using cri-o (and podman & buildah) as container engine in
both container images and DevStack.

View File

@ -13,6 +13,7 @@ function print_usage() {
echo "-c/--conf-dir Specify the path where to place the CNI configuration"
echo "-t/--tag Specify string to use as the tag part of the container image name, i.e., kuryr/cni:tag"
echo "-D/--no-daemon Do not run CNI as a daemon"
echo "-p/--podman Use podman instead of docker to build image"
}
for arg in "$@"; do
@ -24,6 +25,7 @@ for arg in "$@"; do
"--dockerfile") set -- "$@" "-f" ;;
"--tag") set -- "$@" "-t" ;;
"--no-daemon") set -- "$@" "-D" ;;
"--podman") set -- "$@" "-p" ;;
"--"*) print_usage "$arg" >&2; exit 1 ;;
*) set -- "$@" "$arg"
esac
@ -34,16 +36,20 @@ dockerfile="cni.Dockerfile"
image_name="kuryr/cni"
daemonized="True"
build_args=()
build_cmd="docker build"
OPTIND=1
while getopts "hf:b:c:t:D" opt; do
while getopts "hf:b:c:t:D:p" opt; do
case "$opt" in
"h") print_usage; exit 0 ;;
"D") daemonized=False ;;
"f") dockerfile=${OPTARG} ;;
"b") build_args+=('--build-arg' "CNI_BIN_DIR_PATH=${OPTARG}") ;;
"c") build_args+=('--build-arg' "CNI_CONFIG_DIR_PATH=${OPTARG}") ;;
# Until https://github.com/containers/buildah/issues/1206 is resolved
# we need to use buildah directly.
"p") build_cmd="sudo buildah bud" && image_name="docker.io/kuryr/cni" ;;
"t") image_name=${image_name}:${OPTARG} ;;
"?") print_usage >&2; exit 1 ;;
esac
@ -52,7 +58,7 @@ done
shift $((OPTIND - 1))
# create cni daemonset image
docker build -t "$image_name" \
${build_cmd} -t "$image_name" \
--build-arg "CNI_DAEMON=$daemonized" \
"${build_args[@]}" \
-f "$dockerfile" .