Merge "Add container engine option to scripts"

This commit is contained in:
Zuul 2023-05-19 11:59:56 +00:00 committed by Gerrit Code Review
commit 62be98f90c
27 changed files with 237 additions and 99 deletions

View File

@ -1,3 +1,3 @@
---
- name: Destroying all Kolla containers and volumes
script: ../tools/cleanup-containers
script: ../tools/cleanup-containers "{{ kolla_container_engine }}"

View File

@ -1,5 +1,5 @@
---
- name: Removing Kolla images
script: ../tools/cleanup-images --all
script: ../tools/cleanup-images --all -e "{{ kolla_container_engine }}"
when:
- destroy_include_images | bool

View File

@ -1,3 +1,5 @@
---
- name: Ensure the docker service is running
environment:
CONTAINER_ENGINE: "{{ kolla_container_engine }}"
script: ../tools/validate-docker-execute.sh

View File

@ -1,3 +1,5 @@
---
- name: Ensure the docker service is running
environment:
CONTAINER_ENGINE: "{{ kolla_container_engine }}"
script: ../tools/validate-docker-execute.sh

View File

@ -7,7 +7,22 @@ set -o errexit
export PYTHONUNBUFFERED=1
check_failure() {
check_podman_failures() {
failed_containers=$(sudo podman ps -a --format "{{.Names}}" \
--filter status=created \
--filter status=paused \
--filter status=exited \
--filter status=unknown)
}
check_podman_unhealthies() {
unhealthy_containers=$(sudo podman ps -a --format "{{.Names}}" \
--filter health=unhealthy)
}
check_docker_failures() {
# All docker container's status are created, restarting, running, removing,
# paused, exited and dead. Containers without running status are treated as
# failure. removing is added in docker 1.13, just ignore it now.
@ -19,9 +34,26 @@ check_failure() {
--filter status=paused \
--filter status=exited \
--filter status=dead)
}
check_docker_unhealthies() {
unhealthy_containers=$(sudo docker ps -a --format "{{.Names}}" \
--filter health=unhealthy)
}
check_failure() {
if [ "$CONTAINER_ENGINE" = "docker" ]; then
check_docker_failures
check_docker_unhealthies
elif [ "$CONTAINER_ENGINE" = "podman" ]; then
check_podman_failures
check_podman_unhealthies
else
echo "Invalid container engine: ${CONTAINER_ENGINE}"
exit 1
fi
if [[ -n "$unhealthy_containers" ]]; then
exit 1;

View File

@ -24,7 +24,7 @@ function deploy_tenks_logged {
# Script installed onto the host to fool tenks into using the containerised
# Open vSwitch rather than installing its own.
sudo docker exec openvswitch_vswitchd ovs-vsctl "\$@"
sudo ${CONTAINER_ENGINE} exec openvswitch_vswitchd ovs-vsctl "\$@"
EOF
sudo chmod 755 /usr/bin/ovs-vsctl
@ -57,7 +57,7 @@ EOF
function deploy_tenks {
echo "Configuring virtual bare metal via Tenks"
deploy_tenks_logged > /tmp/logs/ansible/deploy-tenks 2>&1
deploy_tenks_logged $1 > /tmp/logs/ansible/deploy-tenks 2>&1
result=$?
if [[ $result != 0 ]]; then
echo "Deploying tenks failed. See ansible/deploy-tenks for details"
@ -67,4 +67,4 @@ function deploy_tenks {
return $result
}
deploy_tenks
deploy_tenks $1

View File

@ -5,12 +5,25 @@ set +o errexit
copy_logs() {
LOG_DIR=${LOG_DIR:-/tmp/logs}
cp -rnL /var/lib/docker/volumes/kolla_logs/_data/* ${LOG_DIR}/kolla/
if [ "$CONTAINER_ENGINE" = "docker" ]; then
VOLUMES_DIR="/var/lib/docker/volumes"
elif [ "$CONTAINER_ENGINE" = "podman" ]; then
VOLUMES_DIR="/var/lib/containers/storage/volumes"
else
echo "Invalid container engine: ${CONTAINER_ENGINE}"
exit 1
fi
cp -rnL ${VOLUMES_DIR}/kolla_logs/_data/* ${LOG_DIR}/kolla/
cp -rnL /etc/kolla/* ${LOG_DIR}/kolla_configs/
# Don't save the IPA images.
rm ${LOG_DIR}/kolla_configs/config/ironic/ironic-agent.{kernel,initramfs}
mkdir ${LOG_DIR}/system_configs/
cp -rL /etc/{hostname,hosts,host.conf,resolv.conf,nsswitch.conf,docker,systemd} ${LOG_DIR}/system_configs/
cp -rL /etc/{hostname,hosts,host.conf,resolv.conf,nsswitch.conf,systemd} ${LOG_DIR}/system_configs/
# copy docker configs if used
if [ "$CONTAINER_ENGINE" = "docker" ]; then
cp -rL /etc/docker/ ${LOG_DIR}/system_configs/
fi
# Remove /var/log/kolla link to not double the data uploaded
unlink /var/log/kolla
cp -rvnL /var/log/* ${LOG_DIR}/system_logs/
@ -18,10 +31,14 @@ copy_logs() {
if [[ -x "$(command -v journalctl)" ]]; then
journalctl --no-pager > ${LOG_DIR}/system_logs/syslog.txt
journalctl --no-pager -u docker.service > ${LOG_DIR}/system_logs/docker.log
journalctl --no-pager -u containerd.service > ${LOG_DIR}/system_logs/containerd.log
journalctl --no-pager -u ${CONTAINER_ENGINE}.service > ${LOG_DIR}/system_logs/${CONTAINER_ENGINE}.log
if [ "$CONTAINER_ENGINE" = "docker" ]; then
journalctl --no-pager -u containerd.service > ${LOG_DIR}/system_logs/containerd.log
fi
else
cp /var/log/upstart/docker.log ${LOG_DIR}/system_logs/docker.log
if [ "$CONTAINER_ENGINE" = "docker" ]; then
cp /var/log/upstart/docker.log ${LOG_DIR}/system_logs/docker.log
fi
fi
cp -r /etc/sudoers.d ${LOG_DIR}/system_logs/
@ -81,8 +98,12 @@ copy_logs() {
# final memory usage and process list
ps -eo user,pid,ppid,lwp,%cpu,%mem,size,rss,cmd > ${LOG_DIR}/system_logs/ps.txt
# docker related information
(docker info && docker images && docker ps -a && docker network ls && docker inspect $(docker ps -aq)) > ${LOG_DIR}/system_logs/docker-info.txt
# container engine related information
(${CONTAINER_ENGINE} info &&
${CONTAINER_ENGINE} images &&
${CONTAINER_ENGINE} ps -a &&
${CONTAINER_ENGINE} network ls &&
${CONTAINER_ENGINE} inspect $(${CONTAINER_ENGINE} ps -aq)) > ${LOG_DIR}/system_logs/${CONTAINER_ENGINE}-info.txt
# save dbus services
dbus-send --system --print-reply --dest=org.freedesktop.DBus /org/freedesktop/DBus org.freedesktop.DBus.ListNames > ${LOG_DIR}/system_logs/dbus-services.txt
@ -98,28 +119,28 @@ copy_logs() {
fi
# bifrost related logs
if [[ $(docker ps --filter name=bifrost_deploy --format "{{.Names}}") ]]; then
if [[ $(${CONTAINER_ENGINE} ps --filter name=bifrost_deploy --format "{{.Names}}") ]]; then
for service in dnsmasq ironic ironic-api ironic-conductor ironic-inspector mariadb nginx; do
mkdir -p ${LOG_DIR}/kolla/$service
docker exec bifrost_deploy systemctl status $service > ${LOG_DIR}/kolla/$service/systemd-status-$service.txt
${CONTAINER_ENGINE} exec bifrost_deploy systemctl status $service > ${LOG_DIR}/kolla/$service/systemd-status-$service.txt
done
docker exec bifrost_deploy journalctl -u mariadb > ${LOG_DIR}/kolla/mariadb/mariadb.txt
${CONTAINER_ENGINE} exec bifrost_deploy journalctl -u mariadb > ${LOG_DIR}/kolla/mariadb/mariadb.txt
fi
# haproxy related logs
if [[ $(docker ps --filter name=haproxy --format "{{.Names}}") ]]; then
if [[ $(${CONTAINER_ENGINE} ps --filter name=haproxy --format "{{.Names}}") ]]; then
mkdir -p ${LOG_DIR}/kolla/haproxy
docker exec haproxy bash -c 'echo show stat | socat stdio /var/lib/kolla/haproxy/haproxy.sock' > ${LOG_DIR}/kolla/haproxy/stats.txt
${CONTAINER_ENGINE} exec haproxy bash -c 'echo show stat | socat stdio /var/lib/kolla/haproxy/haproxy.sock' > ${LOG_DIR}/kolla/haproxy/stats.txt
fi
# FIXME: remove
if [[ $(docker ps -a --filter name=ironic_inspector --format "{{.Names}}") ]]; then
if [[ $(${CONTAINER_ENGINE} ps -a --filter name=ironic_inspector --format "{{.Names}}") ]]; then
mkdir -p ${LOG_DIR}/kolla/ironic-inspector
ls -lR /var/lib/docker/volumes/ironic_inspector_dhcp_hosts > ${LOG_DIR}/kolla/ironic-inspector/var-lib-ls.txt
ls -lR ${VOLUMES_DIR}/ironic_inspector_dhcp_hosts > ${LOG_DIR}/kolla/ironic-inspector/var-lib-ls.txt
fi
for container in $(docker ps -a --format "{{.Names}}"); do
docker logs --timestamps --tail all ${container} &> ${LOG_DIR}/docker_logs/${container}.txt
for container in $(${CONTAINER_ENGINE} ps -a --format "{{.Names}}"); do
${CONTAINER_ENGINE} logs --timestamps --tail all ${container} &> ${LOG_DIR}/container_logs/${container}.txt
done
# Rename files to .txt; this is so that when displayed via
@ -128,7 +149,7 @@ copy_logs() {
# download it, etc.
# Rename all .log files to .txt files
for f in $(find ${LOG_DIR}/{system_logs,kolla,docker_logs} -name "*.log"); do
for f in $(find ${LOG_DIR}/{system_logs,kolla,${CONTAINER_ENGINE}_logs} -name "*.log"); do
mv $f ${f/.log/.txt}
done

View File

@ -10,7 +10,7 @@ function init_swift_logged {
# the order is important due to port incrementation
for ring in object account container; do
# create the *.builder files
sudo docker run \
sudo ${CONTAINER_ENGINE} run \
--rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \
@ -19,7 +19,7 @@ function init_swift_logged {
# add nodes to them
for node in ${STORAGE_NODES[@]}; do
sudo docker run \
sudo ${CONTAINER_ENGINE} run \
--rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \
@ -28,7 +28,7 @@ function init_swift_logged {
done
# create the *.ring.gz files
sudo docker run \
sudo ${CONTAINER_ENGINE} run \
--rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \
@ -36,7 +36,7 @@ function init_swift_logged {
/etc/kolla/config/swift/$ring.builder rebalance
# display contents for debugging
sudo docker run \
sudo ${CONTAINER_ENGINE} run \
--rm \
-v /etc/kolla/config/swift/:/etc/kolla/config/swift/ \
$KOLLA_SWIFT_BASE_IMAGE \

View File

@ -69,6 +69,7 @@
environment:
LOG_DIR: "{{ logs_dir }}/pre"
KOLLA_INTERNAL_VIP_ADDRESS: "{{ kolla_internal_vip_address }}"
CONTAINER_ENGINE: "{{ container_engine }}"
script: get_logs.sh
register: get_logs_result
become: true
@ -84,7 +85,7 @@
state: "directory"
mode: 0777
with_items:
- "docker_logs"
- "container_logs"
- "kolla_configs"
- "system_logs"
- "kolla"

View File

@ -132,6 +132,7 @@
path: "/etc/docker"
state: directory
become: true
when: container_engine == 'docker'
- name: Ensure configuration directories exist
file:
@ -327,6 +328,7 @@
SCENARIO: "{{ scenario }}"
UPPER_CONSTRAINTS: "{{ upper_constraints_file }}"
KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run init-swift.sh script
script:
@ -339,6 +341,7 @@
STORAGE_NODES: "{{ groups['all'] | map('extract', hostvars,
['ansible_'+api_interface_name, 'ipv4', 'address'])
| join(' ') }}"
CONTAINER_ENGINE: "{{ container_engine }}"
when: scenario == 'swift'
# At this point we have generated all necessary configuration, and are
@ -445,6 +448,7 @@
when: scenario == 'zun'
environment:
BASE_DISTRO: "{{ base_distro }}"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-swift.sh script
script:
@ -472,6 +476,7 @@
# by default to ~/tenks-venv
TENKS_VENV_PATH: "{{ ansible_env.HOME }}/kolla-ansible-tenks-venv"
TENKS_SRC_PATH: "{{ ansible_env.HOME }}/src/opendev.org/openstack/tenks"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-ironic.sh script
script:
@ -504,6 +509,8 @@
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "masakari"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-ovn.sh script
script:
@ -511,6 +518,8 @@
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "ovn"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-mariadb.sh script
script:
@ -520,6 +529,7 @@
when: scenario == "mariadb"
environment:
KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run test-prometheus-opensearch.sh script
script:
@ -551,6 +561,8 @@
cmd: tests/check-failure.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run pre-upgrade check-config.sh script
shell:
@ -741,6 +753,8 @@
cmd: tests/test-bifrost.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run upgrade-bifrost.sh script
shell:
@ -762,6 +776,8 @@
cmd: tests/check-failure.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run check-config.sh script
shell:
@ -800,6 +816,8 @@
cmd: tests/check-failure.sh
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
environment:
CONTAINER_ENGINE: "{{ container_engine }}"
- name: Run check-config.sh script
shell:

View File

@ -109,7 +109,7 @@ EOF
mkdir -p /tmp/logs/build
sudo docker run -d --net=host -e REGISTRY_HTTP_ADDR=0.0.0.0:4000 --restart=always -v /opt/kolla_registry/:/var/lib/registry --name registry registry:2
sudo $CONTAINER_ENGINE run -d --net=host -e REGISTRY_HTTP_ADDR=0.0.0.0:4000 --restart=always -v /opt/kolla_registry/:/var/lib/registry --name registry registry:2
python3 -m venv ~/kolla-venv
. ~/kolla-venv/bin/activate
@ -121,8 +121,8 @@ EOF
# NOTE(yoctozepto): due to debian buster we push after images are built
# see https://github.com/docker/for-linux/issues/711
if [[ "debian" == $BASE_DISTRO ]]; then
for img in $(sudo docker image ls --format '{{ .Repository }}:{{ .Tag }}' | grep lokolla/); do
sudo docker push $img;
for img in $(sudo ${CONTAINER_ENGINE} image ls --format '{{ .Repository }}:{{ .Tag }}' | grep lokolla/); do
sudo $CONTAINER_ENGINE push $img;
done
fi

View File

@ -13,6 +13,7 @@ kolla_base_distro: "{{ base_distro }}"
# Zed dropped install_type so we have it only on upgrades
network_interface: "{{ api_interface_name }}"
network_address_family: "{{ address_family }}"
kolla_container_engine: "{{ container_engine }}"
docker_restart_policy: "no"
docker_custom_config:
debug: true

View File

@ -8,11 +8,13 @@ export PYTHONUNBUFFERED=1
function test_bifrost {
container_engine="${1:-docker}"
# TODO(mgoddard): More testing, deploy bare metal nodes.
# TODO(mgoddard): Use openstackclient when clouds.yaml works. See
# https://bugs.launchpad.net/bifrost/+bug/1754070.
attempts=0
while [[ $(sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal driver list -f value" | wc -l) -eq 0 ]]; do
while [[ $(sudo ${container_engine} exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal driver list -f value" | wc -l) -eq 0 ]]; do
attempts=$((attempts + 1))
if [[ $attempts -gt 6 ]]; then
echo "Timed out waiting for ironic conductor to become active"
@ -20,10 +22,10 @@ function test_bifrost {
fi
sleep 10
done
sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node list"
sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node create --driver ipmi --name test-node"
sudo docker exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node delete test-node"
sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node list"
sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node create --driver ipmi --name test-node"
sudo $container_engine exec bifrost_deploy bash -c "OS_CLOUD=bifrost openstack baremetal node delete test-node"
}
test_bifrost
test_bifrost $1

View File

@ -12,7 +12,7 @@ export PYTHONUNBUFFERED=1
function mariadb_stop {
echo "Stopping the database cluster"
kolla-ansible -i ${RAW_INVENTORY} -vvv stop --yes-i-really-really-mean-it --tags mariadb --skip-tags common
if [[ $(sudo docker ps -q | grep mariadb | wc -l) -ne 0 ]]; then
if [[ $(sudo ${container_engine} ps -q | grep mariadb | wc -l) -ne 0 ]]; then
echo "Failed to stop MariaDB cluster"
return 1
fi
@ -48,4 +48,5 @@ function test_mariadb {
return $result
}
container_engine="${1:-docker}"
test_mariadb

View File

@ -8,6 +8,7 @@ set -o pipefail
export PYTHONUNBUFFERED=1
function test_hacluster_logged {
container_engine="${1:-docker}"
local cluster_failure
cluster_failure=0
@ -17,17 +18,17 @@ function test_hacluster_logged {
# var setting from the container which would cause these commands to log up
# to 'trace' (likely a pacemaker bug)
if ! sudo docker exec hacluster_pacemaker cibadmin -VVVVVV --query --local; then
if ! sudo ${container_engine} exec hacluster_pacemaker cibadmin -VVVVVV --query --local; then
cluster_failure=1
fi
local mon_output
if ! mon_output=$(sudo docker exec -e PCMK_debug=no hacluster_pacemaker crm_mon -VVVVV --one-shot); then
if ! mon_output=$(sudo ${container_engine} exec -e PCMK_debug=no hacluster_pacemaker crm_mon -VVVVV --one-shot); then
cluster_failure=1
fi
if ! sudo docker exec -e PCMK_debug=no hacluster_pacemaker crm_verify -VVVVV --live-check; then
if ! sudo ${container_engine} exec -e PCMK_debug=no hacluster_pacemaker crm_verify -VVVVV --live-check; then
cluster_failure=1
fi
@ -81,7 +82,7 @@ function test_masakari_logged {
function test_masakari {
echo "Testing Masakari"
test_hacluster_logged > /tmp/logs/ansible/test-hacluster 2>&1
test_hacluster_logged $1 > /tmp/logs/ansible/test-hacluster 2>&1
test_masakari_logged > /tmp/logs/ansible/test-masakari 2>&1
result=$?
if [[ $result != 0 ]]; then
@ -92,4 +93,4 @@ function test_masakari {
return $result
}
test_masakari
test_masakari $1

View File

@ -15,18 +15,18 @@ function test_ovn {
# List OVN NB/SB entries
echo "OVN NB DB entries:"
sudo docker exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show
sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" show
echo "OVN SB DB entries:"
sudo docker exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show
sudo ${container_engine} exec ovn_northd ovn-sbctl --db "$ovn_sb_connection" show
# Test OVSDB cluster state
if [[ $BASE_DISTRO =~ ^(debian|ubuntu)$ ]]; then
OVNNB_STATUS=$(sudo docker exec ovn_nb_db ovs-appctl -t /var/run/openvswitch/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo docker exec ovn_sb_db ovs-appctl -t /var/run/openvswitch/ovnsb_db.ctl cluster/status OVN_Southbound)
OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/openvswitch/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/openvswitch/ovnsb_db.ctl cluster/status OVN_Southbound)
else
OVNNB_STATUS=$(sudo docker exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo docker exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound)
OVNNB_STATUS=$(sudo ${container_engine} exec ovn_nb_db ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound)
OVNSB_STATUS=$(sudo ${container_engine} exec ovn_sb_db ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound)
fi
if [[ $(grep -o "at tcp:" <<< ${OVNNB_STATUS} | wc -l) != "3" ]]; then
@ -84,9 +84,9 @@ function test_octavia {
openstack floating ip set $lb_fip --port $lb_port_id
echo "OVN NB entries for LB:"
sudo docker exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer
sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list load_balancer
echo "OVN NB entries for NAT:"
sudo docker exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat
sudo ${container_engine} exec ovn_northd ovn-nbctl --db "$ovn_nb_connection" list nat
echo "Attempt to access the load balanced HTTP server."
attempts=12
@ -133,5 +133,5 @@ function test_ovn_setup {
}
container_engine=${1:-docker}
test_ovn_setup

View File

@ -9,12 +9,14 @@ function test_zun_logged {
. /etc/kolla/admin-openrc.sh
. ~/openstackclient-venv/bin/activate
container_engine="${1:-docker}"
echo "TESTING: Zun"
openstack appcontainer service list
openstack appcontainer host list
openstack subnet set --no-dhcp demo-subnet
sudo docker pull alpine
sudo docker save alpine | openstack image create alpine --public --container-format docker --disk-format raw
sudo ${container_engine} pull alpine
sudo ${container_engine} save alpine | openstack image create alpine --public --container-format docker --disk-format raw
openstack appcontainer run --net network=demo-net --name test alpine sleep 1000
attempt=1
while [[ $(openstack appcontainer show test -f value -c status) != "Running" ]]; do
@ -160,7 +162,7 @@ function test_zun {
if [[ -f $log_file ]]; then
log_file=${log_file}-upgrade
fi
test_zun_logged > $log_file 2>&1
test_zun_logged $1 > $log_file 2>&1
result=$?
if [[ $result != 0 ]]; then
echo "Testing Zun failed. See ansible/test-zun for details"
@ -170,4 +172,4 @@ function test_zun {
return $result
}
test_zun
test_zun $1

View File

@ -1,32 +1,43 @@
#!/usr/bin/env bash
containers_running=$(sudo docker ps --filter "label=kolla_version" --format "{{.Names}}")
# default to docker if not specified
engine="${1:-docker}"
shift 1
if ! [[ "$engine" =~ ^(docker|podman)$ ]]; then
echo "Invalid container engine: ${engine}"
exit 1
fi
echo "Using container engine: $engine"
containers_running=$(sudo $engine ps --filter "label=kolla_version" --format "{{.Names}}")
QEMU_PIDS=$(pgrep -l qemu | awk '!/qemu-ga/ && !/qemu-img/ {print $1}')
if [[ "${containers_running}" =~ "nova_libvirt" ]] && [[ $QEMU_PIDS ]] && [[ $(ps --no-headers wwwup $QEMU_PIDS | grep --invert-match '\-xen\-domid 0') ]]; then
echo "Some qemu processes were detected."
echo "Docker will not be able to stop the nova_libvirt container with those running."
echo "Container engine ($engine) will not be able to stop the nova_libvirt container with those running."
echo "Please clean them up before rerunning this script."
exit 1
fi
fi
if [ -n "$1" ]; then
containers_to_kill=$(sudo docker ps --filter "label=kolla_version" --format "{{.Names}}" -a | grep -E "$1" | awk '{print $1}')
volumes_to_remove=$(sudo docker inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \
containers_to_kill=$(sudo $engine ps --filter "label=kolla_version" --format "{{.Names}}" -a | grep -E "$1" | awk '{print $1}')
volumes_to_remove=$(sudo $engine inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \
egrep -v '(^\s*$)' | sort | uniq)
else
containers_to_kill=$(sudo docker ps --filter "label=kolla_version" --format "{{.Names}}" -a)
containers_to_kill=$(sudo $engine ps --filter "label=kolla_version" --format "{{.Names}}" -a)
volumes_to_remove=$(sudo docker inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \
volumes_to_remove=$(sudo $engine inspect -f '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' ${containers_to_kill} | \
egrep -v '(^\s*$)' | sort | uniq)
fi
if [[ "${containers_to_kill}" =~ "openvswitch_vswitchd" ]] && [[ "${containers_running}" =~ "neutron_openvswitch_agent" ]]; then
echo "Removing ovs bridge..."
(sudo docker exec -u root neutron_openvswitch_agent neutron-ovs-cleanup \
(sudo $engine exec -u root neutron_openvswitch_agent neutron-ovs-cleanup \
--config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini \
--ovs_all_ports) > /dev/null
(sudo docker exec -it openvswitch_vswitchd bash -c 'for br in `ovs-vsctl list-br`;do ovs-vsctl --if-exists del-br $br;done') > /dev/null
(sudo $engine exec -it openvswitch_vswitchd bash -c 'for br in `ovs-vsctl list-br`;do ovs-vsctl --if-exists del-br $br;done') > /dev/null
fi
echo "Stopping containers..."
@ -35,15 +46,15 @@ sudo systemctl stop kolla-${container}-container.service
done
echo "Removing containers..."
(sudo docker rm -v -f ${containers_to_kill} 2>&1) > /dev/null
(sudo $engine rm -v -f ${containers_to_kill} 2>&1) > /dev/null
echo "Disconnecting containers from docker host network"
echo "Disconnecting containers from $engine host network"
for container in ${containers_to_kill}; do
(sudo docker network disconnect -f host $container 2>&1) > /dev/null
(sudo $engine network disconnect -f host $container 2>&1) > /dev/null
done
echo "Removing volumes..."
(sudo docker volume rm ${volumes_to_remove} 2>&1) > /dev/null
(sudo $engine volume rm ${volumes_to_remove} 2>&1) > /dev/null
echo "Removing link of kolla_log volume..."
(sudo rm -f /var/log/kolla 2>&1) > /dev/null

View File

@ -1,7 +1,9 @@
#!/usr/bin/env bash
container_engine="docker"
# Move to top level directory
REAL_PATH=$(python -c "import os;print os.path.realpath('$0')")
REAL_PATH=$(python3 -c "import os;print(os.path.realpath('$0'))")
cd "$(dirname "$REAL_PATH")/.."
function process_cmd {
@ -28,34 +30,50 @@ Options:
--help, -h Show this usage information
--image, -i <image> Delete selected images
--image-version <image_version> Set Kolla image version
--engine, -e <container_engine> Container engine to be used
EOF
}
SHORT_OPTS="ahi:"
LONG_OPTS="all,dangling,help,image:,image-version:"
SHORT_OPTS="ahi:e:"
LONG_OPTS="all,dangling,help,image:,image-version:,engine:"
ARGS=$(getopt -o "${SHORT_OPTS}" -l "${LONG_OPTS}" --name "$0" -- "$@") || { usage >&2; exit 2; }
eval set -- "$ARGS"
for arg do
shift
if [ "$arg" = "-e" ] || [ "$arg" = "--engine" ]; then
container_engine="$1"
continue
elif [ "$arg" = "$container_engine" ]; then
continue
fi
eval set -- "$@" "$arg"
done
# catch empty arguments
if [ "$ARGS" = " --" ]; then
eval set -- "$ARGS"
fi
case "$1" in
(--all|-a)
KOLLA_IMAGES="$(sudo docker images -a --filter "label=kolla_version" --format "{{.ID}}")"
KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter "label=kolla_version" --format "{{.ID}}")"
shift
;;
(--dangling)
KOLLA_IMAGES="$(sudo docker images -a --filter dangling=true --format "{{.ID}}")"
KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter dangling=true --format "{{.ID}}")"
shift
;;
(--image|-i)
KOLLA_IMAGES="$(sudo docker images -a --filter "label=kolla_version" --format "{{.Repository}}\t{{.ID}}" | grep -E "$2" | awk '{print $2}')"
KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter "label=kolla_version" --format "{{.Repository}}\t{{.ID}}" | grep -E "$2" | awk '{print $2}')"
shift 2
;;
(--image-version)
KOLLA_IMAGES="$(sudo docker images -a --filter "label=kolla_version=${2}" --format "{{.ID}}")"
KOLLA_IMAGES="$(sudo ${container_engine} images -a --filter "label=kolla_version=${2}" --format "{{.ID}}")"
shift 2
;;
@ -73,5 +91,5 @@ case "$1" in
esac
CMD="sudo docker rmi -f $@ $KOLLA_IMAGES"
CMD="sudo ${container_engine} rmi -f $@ -- $KOLLA_IMAGES"
process_cmd

View File

@ -1,13 +1,15 @@
#!/usr/bin/env bash
container_engine="${1:-docker}"
echo "##### System Identification #####"
egrep -w 'PRETTY_NAME|VERSION_ID' /etc/os-release
echo "##### Docker Version #####"
docker --version
echo "##### ${container_engine^} Version #####"
$container_engine --version
echo "##### Docker Info #####"
docker info
echo "##### ${container_engine^} Info #####"
$container_engine info
echo "##### Ansible Version #####"
ansible --version
@ -19,11 +21,11 @@ echo "##### Globals.yml file #####"
egrep -w 'kolla_base_distro|openstack_release' /etc/kolla/globals.yml
cat /etc/kolla/globals.yml | grep ^enable_
echo "##### Docker Images #####"
docker images -a --filter "label=kolla_version" --filter "dangling=false" --format "{{.ID}} - {{.Repository}}:{{.Tag}} - {{.CreatedSince}}"
echo "##### ${container_engine^} Images #####"
$container_engine images -a --filter "label=kolla_version" --filter "dangling=false" --format "{{.ID}} - {{.Repository}}:{{.Tag}} - {{.CreatedSince}}"
echo "##### All Docker Containers #####"
docker ps -a
echo "##### All ${container_engine^} Containers #####"
$container_engine ps -a
echo "##### Ip Link Show #####"
ip link show

View File

@ -0,0 +1,17 @@
[Unit]
Description=Manage libvirt guests in kolla safely
After=systemd-machined.service
After=virt-guest-shutdown.target
Requires=virt-guest-shutdown.target
[Install]
WantedBy=multi-user.target
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=610
ExecStart=-/usr/bin/podman exec nova_libvirt /bin/rm -f /var/lib/libvirt/libvirt-guests
ExecStart=-/usr/bin/podman start nova_compute
ExecStop=/usr/bin/podman stop nova_compute
ExecStop=/usr/bin/podman exec nova_libvirt /bin/sh -x /usr/libexec/libvirt-guests.sh shutdown

View File

@ -2,6 +2,7 @@
ANSIBLE=`find ansible -type f -exec cat {} \; | wc -l`
DOCKER=`find docker -type f -exec cat {} \; | wc -l`
PODMAN=`find podman -type f -exec cat {} \; | wc -l`
DOC=`find doc -type f -exec cat {} \; | wc -l`
TESTS=`find tests -type f -exec cat {} \; | wc -l`
BUILD=`find kolla -type f -exec cat {} \; | wc -l`
@ -12,7 +13,7 @@ ETC=`find etc -type f -exec cat {} \; | wc -l`
TOOLS=`find tools -type f -exec cat {} \; | wc -l`
VAGRANT=`find contrib/dev/vagrant -type f -exec cat {} \; | wc -l`
CORE=$(($ANSIBLE+$DOCKER+$TESTS+$DOCS+$BUILD))
CORE=$(($ANSIBLE+$DOCKER+$PODMAN+$TESTS+$DOCS+$BUILD))
SUPP=$(($DEMOS+$HEAT+$SPECS+$ETC+$TOOLS+$VAGRANT))
TOTAL=$(($CORE+$SUPP))
@ -23,6 +24,7 @@ echo "Demos $DEMOS"
echo "Doc $DOC"
echo "Etc $ETC"
echo "Docker $DOCKER"
echo "Podman $PODMAN"
echo "Specs $SPECS"
echo "Tests $TESTS"
echo "Tools $TOOLS"

View File

@ -7,7 +7,7 @@
become: true
vars:
systemd_artifacts:
- kolla-libvirt-guests.service
- "kolla-libvirt-guests-{{ container_engine }}.service"
- virt-guest-shutdown.target
tasks:
- name: Install systemd artifacts

View File

@ -154,7 +154,7 @@ def check_docker_become():
for x in YAML_INCLUDE_PATTERNS])
excludes = r'|'.join([fnmatch.translate(x)
for x in YAML_EXCLUDE_PATTERNS])
docker_modules = ('kolla_docker', 'kolla_container_facts', 'kolla_toolbox')
ce_modules = ('kolla_docker', 'kolla_container_facts', 'kolla_toolbox')
cmd_modules = ('command', 'shell')
return_code = 0
roles_path = os.path.join(PROJECT_ROOT, 'ansible', 'roles')
@ -168,24 +168,27 @@ def check_docker_become():
tasks = yaml.safe_load(fp)
tasks = tasks or []
for task in tasks:
for module in docker_modules:
for module in ce_modules:
if module in task and not task.get('become'):
return_code = 1
LOG.error("Use of %s module without become in "
"task %s in %s",
module, task['name'], fullpath)
for module in cmd_modules:
docker_without_become = False
ce_without_become = False
if (module in task and not task.get('become')):
if (isinstance(task[module], str) and
(task[module]).startswith('docker')):
docker_without_become = True
((task[module]).startswith('docker') or
(task[module]).startswith('podman'))):
ce_without_become = True
if (isinstance(task[module], dict) and
task[module]['cmd'].startswith('docker')):
docker_without_become = True
if docker_without_become:
(task[module]['cmd'].startswith('docker') or
task[module]['cmd'].startswith('podman'))):
ce_without_become = True
if ce_without_become:
return_code = 1
LOG.error("Use of docker in %s module without "
LOG.error("Use of container engine in %s "
"module without "
"become in task %s in %s",
module, task['name'], fullpath)

View File

@ -1,13 +1,14 @@
#!/usr/bin/env bash
#
# This script can be used to check user privilege to execute
# docker commands
# docker or podman commands depending on CONTAINER_ENGINE
# environment variable
function check_dockerexecute {
sudo docker ps &>/dev/null
sudo $CONTAINER_ENGINE ps &>/dev/null
return_val=$?
if [ $return_val -ne 0 ]; then
echo "User $USER can't seem to run Docker commands. Verify product documentation to allow user to execute docker commands" 1>&2
echo "User $USER can't seem to run ${CONTAINER_ENGINE^} commands. Verify product documentation to allow user to execute ${CONTAINER_ENGINE^} commands" 1>&2
exit 1
fi
}

View File

@ -28,6 +28,7 @@
- ^zuul\.d/
vars:
previous_release: zed
container_engine: "docker"
scenario: core
virt_type: qemu
is_upgrade: no