Use toci scripts as templates for zuulv3
Change-Id: Ibd131a3da0906fb474178a1359de5f0788c59f54
This commit is contained in:
parent
37c2892ab5
commit
90d80f6570
|
@ -30,6 +30,33 @@
|
|||
- centos-7
|
||||
name: tripleo CI jobs runner
|
||||
tasks:
|
||||
|
||||
- name: render toci_gate_test script
|
||||
template:
|
||||
src: templates/toci_gate_test.sh.j2
|
||||
dest: "{{ ansible_user_dir }}/workspace/tripleo-ci/toci_gate_test.sh"
|
||||
mode: 0755
|
||||
force: yes
|
||||
|
||||
- name: render toci_quickstart script
|
||||
template:
|
||||
src: templates/toci_quickstart.sh.j2
|
||||
dest: "{{ ansible_user_dir }}/workspace/tripleo-ci/toci_quickstart.sh"
|
||||
mode: 0755
|
||||
force: yes
|
||||
|
||||
- name: render common_vars script
|
||||
template:
|
||||
src: templates/common_vars.bash.j2
|
||||
dest: "{{ ansible_user_dir }}/workspace/tripleo-ci/scripts/common_vars.bash"
|
||||
force: yes
|
||||
|
||||
- name: render oooq_common_functions script
|
||||
template:
|
||||
src: templates/oooq_common_functions.sh.j2
|
||||
dest: "{{ ansible_user_dir }}/workspace/tripleo-ci/scripts/oooq_common_functions.sh"
|
||||
force: yes
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
# Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs
|
||||
# just have the branch they're proposed to, e.g ZUUL_BRANCH, in both
|
||||
# cases we need to set STABLE_RELEASE to match for tripleo.sh
|
||||
export ZUUL_BRANCH=${ZUUL_BRANCH:-""}
|
||||
|
||||
# puppet-ceph has stable/jewel branch where we want to test the latest
|
||||
# version of TripleO where puppet-ceph is actually used.
|
||||
# Since we switched to ceph-ansible during Pike, we're testing
|
||||
# TripleO with Ocata version which is probably fine.
|
||||
if [ "$ZUUL_BRANCH" = "stable/jewel" ]; then
|
||||
export STABLE_RELEASE="ocata"
|
||||
fi
|
||||
|
||||
export OVERRIDE_ZUUL_BRANCH=${OVERRIDE_ZUUL_BRANCH:-""}
|
||||
export STABLE_RELEASE=${STABLE_RELEASE:-""}
|
||||
export FEATURE_BRANCH=${FEATURE_BRANCH:-""}
|
||||
# In upgrade jobs STABLE_RELEASE is changed to point to the initial
|
||||
# deployment branch but we need to keep the actual release for the review
|
||||
# to be used in delorean-build phase.
|
||||
export REVIEW_RELEASE=${REVIEW_RELEASE:-""}
|
||||
if [[ -z $STABLE_RELEASE ]]; then
|
||||
if [[ $ZUUL_BRANCH =~ ^stable/ ]]; then
|
||||
export STABLE_RELEASE=${ZUUL_BRANCH#stable/}
|
||||
export REVIEW_RELEASE=${ZUUL_BRANCH#stable/}
|
||||
fi
|
||||
if [[ $OVERRIDE_ZUUL_BRANCH =~ ^stable/ ]]; then
|
||||
export STABLE_RELEASE=${OVERRIDE_ZUUL_BRANCH#stable/}
|
||||
fi
|
||||
fi
|
||||
|
||||
# if we still don't have an stable branch, check if that
|
||||
# is a feature branch
|
||||
if [ -z "$STABLE_RELEASE" ] && [ "$ZUUL_BRANCH" != "master" ]; then
|
||||
export FEATURE_BRANCH=$ZUUL_BRANCH
|
||||
fi
|
||||
|
||||
export TRIPLEO_ROOT=${TRIPLEO_ROOT:-"/opt/stack/new"}
|
||||
export WORKSPACE=${WORKSPACE:-"$TRIPLEO_ROOT/workspace"}
|
||||
export DEVSTACK_GATE_TIMEOUT=${DEVSTACK_GATE_TIMEOUT:-"180"}
|
||||
export PATH=/sbin:/usr/sbin:$PATH
|
||||
|
||||
export UNDERCLOUD_VM_NAME=instack
|
||||
|
||||
export REMAINING_TIME=${REMAINING_TIME:-180}
|
||||
export NODEPOOL_DOCKER_REGISTRY_PROXY=${NODEPOOL_DOCKER_REGISTRY_PROXY:-""}
|
||||
|
||||
# post ci chores to run at the end of ci
|
||||
SSH_OPTIONS='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32'
|
||||
|
||||
# NOTE(pabelanger): this logic should be inverted to only include what developers need, not exclude things on the filesystem.
|
||||
TARCMD="sudo XZ_OPT=-3 tar -cJf - --exclude=var/log/journal --exclude=udev/hwdb.bin --exclude=etc/puppet/modules --exclude=etc/project-config --exclude=etc/services --exclude=selinux/targeted --exclude=etc/services --exclude=etc/pki /var/log /etc"
|
||||
JLOGCMD="sudo journalctl --output short-precise | gzip -c | sudo dd of=/var/log/journal-text.txt.gz"
|
|
@ -0,0 +1,209 @@
|
|||
function previous_release_from {
|
||||
local release="${1:-master}"
|
||||
local type="${2:-mixed_upgrade}"
|
||||
local previous_version=""
|
||||
case "${type}" in
|
||||
'mixed_upgrade')
|
||||
previous_version=$(previous_release_mixed_upgrade_case "${release}");;
|
||||
'ffu_upgrade')
|
||||
previous_version=$(previous_release_ffu_upgrade_case "${release}");;
|
||||
*)
|
||||
echo "UNKNOWN_TYPE"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
echo "${previous_version}"
|
||||
}
|
||||
|
||||
function previous_release_mixed_upgrade_case {
|
||||
local release="${1:-master}"
|
||||
case "${release}" in
|
||||
''|master)
|
||||
# NOTE: we need to update this when we cut a stable branch
|
||||
echo "queens"
|
||||
;;
|
||||
queens)
|
||||
echo "pike"
|
||||
;;
|
||||
pike)
|
||||
echo "ocata"
|
||||
;;
|
||||
ocata)
|
||||
echo "newton"
|
||||
;;
|
||||
newton)
|
||||
echo "mitaka"
|
||||
;;
|
||||
*)
|
||||
echo "UNKNOWN_RELEASE"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function previous_release_ffu_upgrade_case {
|
||||
local release="${1:-master}"
|
||||
|
||||
case "${release}" in
|
||||
''|master)
|
||||
# NOTE: we need to update this when we cut a stable branch
|
||||
echo "newton"
|
||||
;;
|
||||
queens)
|
||||
echo "newton"
|
||||
;;
|
||||
*)
|
||||
echo "INVALID_RELEASE_FOR_FFU"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function is_featureset {
|
||||
local type="${1}"
|
||||
local featureset_file="${2}"
|
||||
|
||||
[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]
|
||||
}
|
||||
|
||||
function run_with_timeout {
|
||||
# First parameter is the START_JOB_TIME
|
||||
# Second is the command to be executed
|
||||
JOB_TIME=$1
|
||||
shift
|
||||
COMMAND=$@
|
||||
# Leave 20 minutes for quickstart logs collection for ovb only
|
||||
if [[ "$TOCI_JOBTYPE" =~ "ovb" ]]; then
|
||||
RESERVED_LOG_TIME=20
|
||||
else
|
||||
RESERVED_LOG_TIME=3
|
||||
fi
|
||||
# Use $REMAINING_TIME of infra to calculate maximum time for remaining part of job
|
||||
REMAINING_TIME=${REMAINING_TIME:-180}
|
||||
TIME_FOR_COMMAND=$(( REMAINING_TIME - ($(date +%s) - JOB_TIME)/60 - $RESERVED_LOG_TIME))
|
||||
|
||||
if [[ $TIME_FOR_COMMAND -lt 1 ]]; then
|
||||
return 143
|
||||
fi
|
||||
/usr/bin/timeout --preserve-status ${TIME_FOR_COMMAND}m ${COMMAND}
|
||||
}
|
||||
|
||||
function collect_logs {
|
||||
cat <<-EOF > $LOGS_DIR/collect_logs.sh
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
export NODEPOOL_PROVIDER=${NODEPOOL_PROVIDER:-''}
|
||||
export STATS_TESTENV=${STATS_TESTENV:-''}
|
||||
export STATS_OOOQ=${STATS_OOOQ:-''}
|
||||
export START_JOB_TIME=${START_JOB_TIME:-''}
|
||||
export ZUUL_PIPELINE=${ZUUL_PIPELINE:-''}
|
||||
export DEVSTACK_GATE_TIMEOUT=${DEVSTACK_GATE_TIMEOUT:-''}
|
||||
export REMAINING_TIME=${REMAINING_TIME:-''}
|
||||
export LOCAL_WORKING_DIR="$WORKSPACE/.quickstart"
|
||||
export OPT_WORKDIR=$LOCAL_WORKING_DIR
|
||||
export WORKING_DIR="$HOME"
|
||||
export LOGS_DIR=$WORKSPACE/logs
|
||||
export VIRTUAL_ENV_DISABLE_PROMPT=1
|
||||
export ANSIBLE_CONFIG=$OOOQ_DIR/ansible.cfg
|
||||
export ARA_DATABASE=sqlite:///${LOCAL_WORKING_DIR}/ara.sqlite
|
||||
export ZUUL_CHANGES=${ZUUL_CHANGES:-''}
|
||||
export NODES_FILE=${NODES_FILE:-''}
|
||||
export TOCI_JOBTYPE=$TOCI_JOBTYPE
|
||||
export STABLE_RELEASE=${STABLE_RELEASE:-''}
|
||||
export QUICKSTART_RELEASE=${QUICKSTART_RELEASE:-''}
|
||||
|
||||
set +u
|
||||
source $LOCAL_WORKING_DIR/bin/activate
|
||||
set -u
|
||||
source $OOOQ_DIR/ansible_ssh_env.sh
|
||||
|
||||
/usr/bin/timeout --preserve-status 40m $QUICKSTART_COLLECTLOGS_CMD > $LOGS_DIR/quickstart_collect_logs.log || \
|
||||
echo "WARNING: quickstart collect-logs failed, check quickstart_collectlogs.log for details"
|
||||
|
||||
cp $LOGS_DIR/undercloud/var/log/postci.txt.gz $LOGS_DIR/ || true
|
||||
|
||||
if [[ -e $LOGS_DIR/undercloud/home/$USER/tempest/testrepository.subunit.gz ]]; then
|
||||
cp $LOGS_DIR/undercloud/home/$USER/tempest/testrepository.subunit.gz ${LOGS_DIR}/testrepository.subunit.gz
|
||||
elif [[ -e $LOGS_DIR/undercloud/home/$USER/pingtest.subunit.gz ]]; then
|
||||
cp $LOGS_DIR/undercloud/home/$USER/pingtest.subunit.gz ${LOGS_DIR}/testrepository.subunit.gz
|
||||
elif [[ -e $LOGS_DIR/undercloud/home/$USER/undercloud_sanity.subunit.gz ]]; then
|
||||
cp $LOGS_DIR/undercloud/home/$USER/undercloud_sanity.subunit.gz ${LOGS_DIR}/testrepository.subunit.gz
|
||||
fi
|
||||
|
||||
# Copy tempest.html to root dir
|
||||
cp $LOGS_DIR/undercloud/home/$USER/tempest/tempest.html.gz ${LOGS_DIR} || true
|
||||
|
||||
# Copy tempest and .testrepository directory to /opt/stack/new/tempest and
|
||||
# unzip
|
||||
sudo mkdir -p /opt/stack/new
|
||||
sudo cp -Rf $LOGS_DIR/undercloud/home/$USER/tempest /opt/stack/new || true
|
||||
sudo gzip -d -r /opt/stack/new/tempest/.testrepository || true
|
||||
|
||||
# record the size of the logs directory
|
||||
# -L, --dereference dereference all symbolic links
|
||||
# Note: tail -n +1 is to prevent the error 'Broken Pipe' e.g. 'sort: write failed: standard output: Broken pipe'
|
||||
|
||||
du -L -ch $LOGS_DIR/* | tail -n +1 | sort -rh | head -n 200 &> $LOGS_DIR/log-size.txt || true
|
||||
EOF
|
||||
|
||||
if [[ "${NODEPOOL_PROVIDER:-''}" == "rdo-cloud-tripleo" ]] || [[ "${NODEPOOL_PROVIDER:-''}" == "tripleo-test-cloud-rh1" ]]; then
|
||||
if [[ "$TOCI_JOBTYPE" =~ "ovb" ]]; then
|
||||
bash $LOGS_DIR/collect_logs.sh
|
||||
# rename script to not to run it in multinode jobs
|
||||
mv $LOGS_DIR/collect_logs.sh $LOGS_DIR/ovb_collect_logs.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
get_extra_vars_from_release()
|
||||
{
|
||||
local release_name=$1
|
||||
local release_hash=$2
|
||||
local release_file=$LOCAL_WORKING_DIR/config/release/tripleo-ci/$release_name.yml
|
||||
echo "--extra-vars @$release_file -e dlrn_hash=$release_hash -e get_build_command=$release_hash"
|
||||
}
|
||||
|
||||
# Enclose IPv6 addresses in brackets.
|
||||
# This is needed for scp command where the first column of IPv6 address gets
|
||||
# interpreted as the separator between address and path otherwise.
|
||||
# $1 : IP address to sanitize
|
||||
function sanitize_ip_address {
|
||||
ip=$1
|
||||
if [[ $ip =~ .*:.* ]]; then
|
||||
echo \[$ip\]
|
||||
else
|
||||
echo $ip
|
||||
fi
|
||||
}
|
||||
|
||||
function echo_vars_to_deploy_env_oooq {
|
||||
CALLER=$(caller)
|
||||
echo "# Written via echo_vars_to_deploy_env from $CALLER" >> $TRIPLEO_ROOT/tripleo-ci/deploy.env
|
||||
for VAR in NODEPOOL_CENTOS_MIRROR http_proxy MY_IP no_proxy NODECOUNT SSH_OPTIONS STABLE_RELEASE TRIPLEO_ROOT TOCI_JOBTYPE JOB_NAME SUBNODES_SSH_KEY FEATURE_BRANCH BOOTSTRAP_SUBNODES_MINIMAL; do
|
||||
if [ -n "${!VAR:+x}" ]; then
|
||||
echo "export $VAR=\"${!VAR}\"" >> $TRIPLEO_ROOT/tripleo-ci/deploy.env
|
||||
fi
|
||||
done
|
||||
# TODO(gcerami) uncomment this code if 3nodes jobs are implemented before the bootstrap role
|
||||
# in quickstart. If the bootstrap role is implemented first, this function can be completely
|
||||
# removed
|
||||
#for role in $OVERCLOUD_ROLES; do
|
||||
# eval hosts=\${${role}_hosts}
|
||||
# echo "export ${role}_hosts=\"${hosts}\"" >> $TRIPLEO_ROOT/tripleo-ci/deploy.env
|
||||
#done
|
||||
}
|
||||
|
||||
function subnodes_scp_deploy_env {
|
||||
for ip in $(cat /etc/nodepool/sub_nodes_private); do
|
||||
sanitized_address=$(sanitize_ip_address $ip)
|
||||
ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
|
||||
sudo mkdir -p $TRIPLEO_ROOT/tripleo-ci
|
||||
scp $SSH_OPTIONS -i /etc/nodepool/id_rsa \
|
||||
$TRIPLEO_ROOT/tripleo-ci/deploy.env ${sanitized_address}:
|
||||
ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
|
||||
sudo cp deploy.env $TRIPLEO_ROOT/tripleo-ci/deploy.env
|
||||
done
|
||||
}
|
||||
|
|
@ -0,0 +1,304 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source $(dirname $0)/scripts/common_vars.bash
|
||||
|
||||
set -eux
|
||||
export START_JOB_TIME=$(date +%s)
|
||||
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
|
||||
|
||||
source $TRIPLEO_ROOT/tripleo-ci/scripts/oooq_common_functions.sh
|
||||
|
||||
if [ -f /etc/nodepool/provider ] ; then
|
||||
# this sets
|
||||
# NODEPOOL_PROVIDER (e.g tripleo-test-cloud-rh1)
|
||||
# NODEPOOL_CLOUD (e.g.tripleo-test-cloud-rh1)
|
||||
# NODEPOOL_REGION (e.g. regionOne)
|
||||
# NODEPOOL_AZ
|
||||
source /etc/nodepool/provider
|
||||
|
||||
# source variables common across all the scripts.
|
||||
source /etc/ci/mirror_info.sh
|
||||
|
||||
export RHCLOUD=''
|
||||
if [ ${NODEPOOL_PROVIDER:-''} == 'rdo-cloud-tripleo' ]; then
|
||||
RHCLOUD='rdocloud'
|
||||
source $(dirname $0)/scripts/$RHCLOUD.env
|
||||
|
||||
# In order to save space remove the cached git repositories, at this point in
|
||||
# CI the ones we are interested in have been cloned to /opt/stack/new. We
|
||||
# can also remove some distro images cached on the images.
|
||||
sudo rm -rf /opt/git
|
||||
fi
|
||||
fi
|
||||
|
||||
# default $NODEPOOL_PROVIDER if not already set as it's used later
|
||||
export NODEPOOL_PROVIDER=${NODEPOOL_PROVIDER:-""}
|
||||
|
||||
|
||||
# create logs dir (check if collect-logs doesn't already do this)
|
||||
mkdir -p $WORKSPACE/logs
|
||||
|
||||
# Set job as failed until it's overwritten by pingtest/tempest real test subunit
|
||||
cat $TRIPLEO_ROOT/tripleo-ci/scripts/fake_fail_subunit | gzip - > $WORKSPACE/logs/testrepository.subunit.gz
|
||||
|
||||
|
||||
# NOTE(trown): In openstack-infra we have pip already, but this will ensure we
|
||||
# have it available in other environments.
|
||||
command -v pip || \
|
||||
(curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"; sudo python get-pip.py)
|
||||
|
||||
sudo yum -y install python-requests python-urllib3
|
||||
sudo pip install shyaml
|
||||
|
||||
|
||||
|
||||
# Sets whether or not this job will upload images.
|
||||
export PERIODIC=0
|
||||
# Sets which repositories to use in the job
|
||||
export QUICKSTART_RELEASE="${STABLE_RELEASE:-master}"
|
||||
# Stores OVB undercloud instance id
|
||||
export UCINSTANCEID="null"
|
||||
# Define environment variables file
|
||||
export ENV_VARS=""
|
||||
# Define file with set of features to test
|
||||
export FEATURESET_FILE=""
|
||||
export FEATURESET_CONF=""
|
||||
# Define file with nodes topology
|
||||
export NODES_FILE=""
|
||||
# Set the number of overcloud nodes
|
||||
export NODECOUNT=0
|
||||
# Sets the undercloud hostname
|
||||
export UNDERCLOUD=""
|
||||
# Select the tags to run
|
||||
export TAGS=all
|
||||
# Identify in which environment we're deploying
|
||||
export ENVIRONMENT=""
|
||||
# Set the overcloud hosts for multinode
|
||||
export OVERCLOUD_HOSTS=
|
||||
export CONTROLLER_HOSTS=
|
||||
export SUBNODES_SSH_KEY=
|
||||
TIMEOUT_SECS=$((DEVSTACK_GATE_TIMEOUT*60))
|
||||
export EXTRA_VARS=${EXTRA_VARS:-""}
|
||||
export VXLAN_VARS=${VXLAN_VARS:-""}
|
||||
export NODES_ARGS=""
|
||||
export EXTRANODE=""
|
||||
export EMIT_RELEASES_EXTRA_ARGS=""
|
||||
# Set playbook execution status
|
||||
export PLAYBOOK_DRY_RUN=${PLAYBOOK_DRY_RUN:=0}
|
||||
export COLLECT_CONF="$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/collect-logs.yml"
|
||||
LOCAL_WORKING_DIR="$WORKSPACE/.quickstart"
|
||||
LWD=$LOCAL_WORKING_DIR
|
||||
QUICKSTART_SH_JOBS="ovb-3ctlr_1comp-featureset001 multinode-1ctlr-featureset010"
|
||||
|
||||
export RELEASES_FILE_OUTPUT=$WORKSPACE/logs/releases.sh
|
||||
export RELEASES_SCRIPT=$TRIPLEO_ROOT/tripleo-ci/scripts/emit_releases_file/emit_releases_file.py
|
||||
export RELEASES_SCRIPT_LOGFILE=$WORKSPACE/logs/emit_releases_file.log
|
||||
|
||||
# Assemble quickstart configuration based on job type keywords
|
||||
for JOB_TYPE_PART in $(sed 's/-/ /g' <<< "${TOCI_JOBTYPE:-}") ; do
|
||||
case $JOB_TYPE_PART in
|
||||
featureset*)
|
||||
FEATURESET_FILE="$LWD/config/general_config/$JOB_TYPE_PART.yml"
|
||||
FEATURESET_CONF="$FEATURESET_CONF --extra-vars @$FEATURESET_FILE"
|
||||
MIXED_UPGRADE_TYPE=''
|
||||
# Order matters. ffu featureset has both mixed version and ffu_overcloud_upgrade.
|
||||
if is_featureset ffu_overcloud_upgrade "$TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$JOB_TYPE_PART.yml"; then
|
||||
MIXED_UPGRADE_TYPE='ffu_upgrade'
|
||||
elif is_featureset mixed_upgrade "$TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$JOB_TYPE_PART.yml"; then
|
||||
MIXED_UPGRADE_TYPE='mixed_upgrade'
|
||||
elif is_featureset overcloud_update "$TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$JOB_TYPE_PART.yml"; then
|
||||
TAGS="$TAGS,overcloud-update"
|
||||
elif is_featureset undercloud_upgrade "$TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$JOB_TYPE_PART.yml"; then
|
||||
TAGS="$TAGS,undercloud-upgrade"
|
||||
export UPGRADE_RELEASE=$QUICKSTART_RELEASE
|
||||
export QUICKSTART_RELEASE=$(previous_release_mixed_upgrade_case "${UPGRADE_RELEASE}")
|
||||
fi
|
||||
# Set UPGRADE_RELEASE if applicable
|
||||
if [ -n "${MIXED_UPGRADE_TYPE}" ]; then
|
||||
export UPGRADE_RELEASE=$(previous_release_from "${STABLE_RELEASE}" "${MIXED_UPGRADE_TYPE}")
|
||||
QUICKSTART_RELEASE="$QUICKSTART_RELEASE-undercloud-$UPGRADE_RELEASE-overcloud"
|
||||
# Run overcloud-upgrade tag only in upgrades jobs
|
||||
TAGS="$TAGS,overcloud-upgrade"
|
||||
fi
|
||||
;;
|
||||
ovb)
|
||||
OVB=1
|
||||
ENVIRONMENT="ovb"
|
||||
UCINSTANCEID=$(http_proxy= curl http://169.254.169.254/openstack/2015-10-15/meta_data.json | python -c 'import json, sys; print json.load(sys.stdin)["uuid"]')
|
||||
if [[ " $QUICKSTART_SH_JOBS " =~ " $TOCI_JOBTYPE " ]]; then
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"baremetal-full-deploy.yml"}
|
||||
else
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"ovb-setup.yml baremetal-full-undercloud.yml baremetal-full-overcloud-prep.yml baremetal-full-overcloud.yml baremetal-full-overcloud-validate.yml"}
|
||||
fi
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/ovb.yml"
|
||||
if [[ -f "$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/ovb-$RHCLOUD.yml" ]]; then
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/ovb-$RHCLOUD.yml"
|
||||
fi
|
||||
UNDERCLOUD="undercloud"
|
||||
;;
|
||||
multinode)
|
||||
SUBNODES_SSH_KEY=/etc/nodepool/id_rsa
|
||||
ENVIRONMENT="osinfra"
|
||||
if [[ " $QUICKSTART_SH_JOBS " =~ " $TOCI_JOBTYPE " ]]; then
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"multinode.yml"}
|
||||
else
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"quickstart.yml multinode-undercloud.yml multinode-overcloud-prep.yml multinode-overcloud.yml multinode-overcloud-update.yml multinode-overcloud-upgrade.yml multinode-validate.yml"}
|
||||
fi
|
||||
FEATURESET_CONF=" --extra-vars @$LWD/config/general_config/featureset-multinode-common.yml $FEATURESET_CONF"
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode.yml"
|
||||
if [[ $NODEPOOL_PROVIDER == "rdo-cloud-tripleo" ]]; then
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode-rdocloud.yml"
|
||||
fi
|
||||
UNDERCLOUD="127.0.0.2"
|
||||
TAGS="build,undercloud-setup,undercloud-scripts,undercloud-install,undercloud-post-install,tripleo-validations,overcloud-scripts,overcloud-prep-config,overcloud-prep-containers,overcloud-deploy,overcloud-post-deploy,overcloud-validate"
|
||||
CONTROLLER_HOSTS=$(sed -n 1,1p /etc/nodepool/sub_nodes_private)
|
||||
OVERCLOUD_HOSTS=$(cat /etc/nodepool/sub_nodes_private)
|
||||
;;
|
||||
singlenode)
|
||||
ENVIRONMENT="osinfra"
|
||||
UNDERCLOUD="127.0.0.2"
|
||||
if [[ " $QUICKSTART_SH_JOBS " =~ " $TOCI_JOBTYPE " ]]; then
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"multinode.yml"}
|
||||
else
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"quickstart.yml multinode-undercloud.yml multinode-undercloud-upgrade.yml multinode-overcloud-prep.yml multinode-overcloud.yml multinode-overcloud-upgrade.yml multinode-validate.yml"}
|
||||
fi
|
||||
FEATURESET_CONF=" --extra-vars @$LWD/config/general_config/featureset-multinode-common.yml $FEATURESET_CONF"
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode.yml"
|
||||
if [[ $NODEPOOL_PROVIDER == "rdo-cloud-tripleo" ]]; then
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode-rdocloud.yml"
|
||||
fi
|
||||
TAGS="build,undercloud-setup,undercloud-scripts,undercloud-install,undercloud-validate,images"
|
||||
;;
|
||||
standalone)
|
||||
ENVIRONMENT="osinfra"
|
||||
UNDERCLOUD="127.0.0.2"
|
||||
export PLAYBOOKS=${PLAYBOOKS:-"quickstart.yml multinode-standalone.yml"}
|
||||
FEATURESET_CONF=" --extra-vars @$LWD/config/general_config/featureset-multinode-common.yml $FEATURESET_CONF"
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode.yml"
|
||||
if [[ $NODEPOOL_PROVIDER == "rdo-cloud-tripleo" ]]; then
|
||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode-rdocloud.yml"
|
||||
fi
|
||||
TAGS="build,standalone"
|
||||
;;
|
||||
periodic)
|
||||
PERIODIC=1
|
||||
QUICKSTART_RELEASE="promotion-testing-hash-${QUICKSTART_RELEASE}"
|
||||
EMIT_RELEASES_EXTRA_ARGS="$EMIT_RELEASES_EXTRA_ARGS --is-periodic"
|
||||
;;
|
||||
gate)
|
||||
;;
|
||||
dryrun)
|
||||
PLAYBOOK_DRY_RUN=1
|
||||
;;
|
||||
*)
|
||||
# the rest should be node configuration
|
||||
NODES_FILE="$TRIPLEO_ROOT/tripleo-quickstart/config/nodes/$JOB_TYPE_PART.yml"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
if [[ -f "$RELEASES_SCRIPT" ]] && [[ $FEATURESET_FILE =~ '037' || $FEATURESET_FILE =~ '050' || $FEATURESET_FILE =~ '010' || $FEATURESET_FILE =~ '011' ]]; then
|
||||
|
||||
python $RELEASES_SCRIPT \
|
||||
--stable-release ${STABLE_RELEASE:-"master"} \
|
||||
--featureset-file $TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$(basename $FEATURESET_FILE) \
|
||||
--output-file $RELEASES_FILE_OUTPUT \
|
||||
--log-file $RELEASES_SCRIPT_LOGFILE \
|
||||
$EMIT_RELEASES_EXTRA_ARGS
|
||||
fi
|
||||
|
||||
|
||||
if [[ ! -z $NODES_FILE ]]; then
|
||||
pushd $TRIPLEO_ROOT/tripleo-quickstart
|
||||
NODECOUNT=$(shyaml get-value node_count < $NODES_FILE)
|
||||
popd
|
||||
NODES_ARGS="--extra-vars @$NODES_FILE"
|
||||
for PART in $(sed 's/_/ /g' <<< "$NODES_FILE") ; do
|
||||
if [[ "$PART" == *"supp"* ]]; then
|
||||
EXTRANODE=" --extra-nodes ${PART//[!0-9]/} "
|
||||
fi;
|
||||
done
|
||||
fi
|
||||
|
||||
# Import gated tripleo-upgrade in oooq for upgrades/updates jobs
|
||||
if [[ -d $TRIPLEO_ROOT/tripleo-upgrade ]]; then
|
||||
echo "file://${TRIPLEO_ROOT}/tripleo-upgrade/#egg=tripleo-upgrade" >> ${TRIPLEO_ROOT}/tripleo-quickstart/quickstart-extras-requirements.txt
|
||||
else
|
||||
# Otherwise, if not importing it, oooq will fail when loading
|
||||
# tripleo-upgrade role in the playbook.
|
||||
echo "git+https://git.openstack.org/openstack/tripleo-upgrade.git@${ZUUL_BRANCH}#egg=tripleo-upgrade" >> ${TRIPLEO_ROOT}/tripleo-quickstart/quickstart-extras-requirements.txt
|
||||
fi
|
||||
|
||||
# Start time tracking
|
||||
export STATS_TESTENV=$(date +%s)
|
||||
pushd $TRIPLEO_ROOT/tripleo-ci
|
||||
if [ -z "${TE_DATAFILE:-}" -a "$ENVIRONMENT" = "ovb" ] ; then
|
||||
|
||||
export GEARDSERVER=${TEBROKERIP-192.168.1.1}
|
||||
# NOTE(pabelanger): We need gear for testenv, but this really should be
|
||||
# handled by tox.
|
||||
sudo pip install gear
|
||||
# Kill the whole job if it doesn't get a testenv in 20 minutes as it likely will timout in zuul
|
||||
( sleep 1200 ; [ ! -e /tmp/toci.started ] && sudo kill -9 $$ ) &
|
||||
|
||||
# We only support multi-nic at the moment
|
||||
NETISO_ENV="multi-nic"
|
||||
|
||||
# provision env in rh cloud, then start quickstart
|
||||
./testenv-client -b $GEARDSERVER:4730 -t $TIMEOUT_SECS \
|
||||
--envsize $NODECOUNT --ucinstance $UCINSTANCEID \
|
||||
--net-iso $NETISO_ENV $EXTRANODE -- ./toci_quickstart.sh
|
||||
elif [ "$ENVIRONMENT" = "ovb" ] ; then
|
||||
# We only support multi-nic at the moment
|
||||
NETISO_ENV="multi-nic"
|
||||
./toci_quickstart.sh
|
||||
else
|
||||
|
||||
# Copy nodepool keys to current user
|
||||
sudo cp /etc/nodepool/id_rsa* $HOME/.ssh/
|
||||
sudo chown $USER:$USER $HOME/.ssh/id_rsa*
|
||||
chmod 0600 $HOME/.ssh/id_rsa*
|
||||
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
|
||||
# pre-ansible requirement
|
||||
sudo mkdir -p /root/.ssh/
|
||||
# everything below here *MUST* be translated to a role ASAP
|
||||
# empty image to fool overcloud deployment
|
||||
# set no_proxy variable
|
||||
export IP_DEVICE=${IP_DEVICE:-"eth0"}
|
||||
MY_IP=$(ip addr show dev $IP_DEVICE | awk '/inet / {gsub("/.*", "") ; print $2}')
|
||||
MY_IP_eth1=$(ip addr show dev eth1 | awk '/inet / {gsub("/.*", "") ; print $2}') || MY_IP_eth1=""
|
||||
|
||||
export http_proxy=""
|
||||
undercloud_net_range="192.168.24."
|
||||
undercloud_services_ip=$undercloud_net_range"1"
|
||||
undercloud_haproxy_public_ip=$undercloud_net_range"2"
|
||||
undercloud_haproxy_admin_ip=$undercloud_net_range"3"
|
||||
export no_proxy=$undercloud_services_ip,$undercloud_haproxy_public_ip,$undercloud_haproxy_admin_ip,$MY_IP,$MY_IP_eth1
|
||||
|
||||
|
||||
|
||||
# multinode bootstrap script
|
||||
export DO_BOOTSTRAP_SUBNODES=${DO_BOOTSTRAP_SUBNODES:-1}
|
||||
export BOOTSTRAP_SUBNODES_MINIMAL=1
|
||||
overcloud_release=${UPGRADE_RELEASE:-$STABLE_RELEASE}
|
||||
if [ "${overcloud_release}" = "newton" ]; then
|
||||
BOOTSTRAP_SUBNODES_MINIMAL=0
|
||||
fi
|
||||
|
||||
echo_vars_to_deploy_env_oooq
|
||||
subnodes_scp_deploy_env
|
||||
if [ "$DO_BOOTSTRAP_SUBNODES" = "1" ]; then
|
||||
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh \
|
||||
--bootstrap-subnodes \
|
||||
2>&1 | awk '{ print strftime("%Y-%m-%d %H:%M:%S |"), $0; fflush(); }' | sudo tee /var/log/bootstrap-subnodes.log \
|
||||
|| (tail -n 50 /var/log/bootstrap-subnodes.log && false)
|
||||
fi
|
||||
|
||||
|
||||
# finally, run quickstart
|
||||
./toci_quickstart.sh
|
||||
fi
|
||||
|
||||
echo "Run completed"
|
||||
echo "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.logs.size_mb" "$(du -sm $WORKSPACE/logs | awk {'print $1'})" "$(date +%s)" | nc 66.187.229.172 2003 || true
|
|
@ -0,0 +1,182 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eux
|
||||
set -o pipefail
|
||||
export ANSIBLE_NOCOLOR=1
|
||||
[[ -n ${STATS_TESTENV:-''} ]] && export STATS_TESTENV=$(( $(date +%s) - STATS_TESTENV ))
|
||||
export STATS_OOOQ=$(date +%s)
|
||||
|
||||
LOCAL_WORKING_DIR="$WORKSPACE/.quickstart"
|
||||
WORKING_DIR="$HOME"
|
||||
LOGS_DIR=$WORKSPACE/logs
|
||||
|
||||
source $TRIPLEO_ROOT/tripleo-ci/scripts/oooq_common_functions.sh
|
||||
|
||||
## Signal to toci_gate_test.sh we've started by
|
||||
touch /tmp/toci.started
|
||||
|
||||
export DEFAULT_ARGS="--extra-vars local_working_dir=$LOCAL_WORKING_DIR \
|
||||
--extra-vars virthost=$UNDERCLOUD \
|
||||
--inventory $LOCAL_WORKING_DIR/hosts \
|
||||
--extra-vars tripleo_root=$TRIPLEO_ROOT \
|
||||
--extra-vars working_dir=$WORKING_DIR \
|
||||
"
|
||||
|
||||
# --install-deps arguments installs deps and then quits, no other arguments are
|
||||
# processed.
|
||||
QUICKSTART_PREPARE_CMD="
|
||||
./quickstart.sh
|
||||
--install-deps
|
||||
"
|
||||
|
||||
QUICKSTART_VENV_CMD="
|
||||
./quickstart.sh
|
||||
--bootstrap
|
||||
--no-clone
|
||||
--working-dir $LOCAL_WORKING_DIR
|
||||
--playbook noop.yml
|
||||
--retain-inventory
|
||||
$UNDERCLOUD
|
||||
"
|
||||
|
||||
QUICKSTART_INSTALL_CMD="
|
||||
$LOCAL_WORKING_DIR/bin/ansible-playbook
|
||||
--tags $TAGS
|
||||
--skip-tags teardown-all
|
||||
"
|
||||
|
||||
QUICKSTART_COLLECTLOGS_CMD="$LOCAL_WORKING_DIR/bin/ansible-playbook \
|
||||
$LOCAL_WORKING_DIR/playbooks/collect-logs.yml \
|
||||
-vv \
|
||||
--extra-vars @$LOCAL_WORKING_DIR/config/release/tripleo-ci/$QUICKSTART_RELEASE.yml \
|
||||
$FEATURESET_CONF \
|
||||
$ENV_VARS \
|
||||
$EXTRA_VARS \
|
||||
$DEFAULT_ARGS \
|
||||
--extra-vars @$COLLECT_CONF \
|
||||
--extra-vars artcl_collect_dir=$LOGS_DIR \
|
||||
--tags all \
|
||||
--skip-tags teardown-all \
|
||||
"
|
||||
|
||||
export QUICKSTART_DEFAULT_RELEASE_ARG="--extra-vars @$LOCAL_WORKING_DIR/config/release/tripleo-ci/$QUICKSTART_RELEASE.yml"
|
||||
|
||||
declare -A RELEASE_ARGS=()
|
||||
|
||||
if [[ -f "$RELEASES_FILE_OUTPUT" ]]; then
|
||||
|
||||
source $RELEASES_FILE_OUTPUT
|
||||
|
||||
declare -A RELEASE_ARGS=(
|
||||
["multinode-undercloud.yml"]=$(get_extra_vars_from_release \
|
||||
$UNDERCLOUD_INSTALL_RELEASE $UNDERCLOUD_INSTALL_HASH)
|
||||
["multinode-undercloud-upgrade.yml"]=$(get_extra_vars_from_release \
|
||||
$UNDERCLOUD_TARGET_RELEASE $UNDERCLOUD_TARGET_HASH)
|
||||
["multinode-overcloud-prep.yml"]=$(get_extra_vars_from_release \
|
||||
$OVERCLOUD_DEPLOY_RELEASE $OVERCLOUD_DEPLOY_HASH)
|
||||
["multinode-overcloud.yml"]=$(get_extra_vars_from_release \
|
||||
$OVERCLOUD_DEPLOY_RELEASE $OVERCLOUD_DEPLOY_HASH)
|
||||
["multinode-overcloud-update.yml"]=$(get_extra_vars_from_release \
|
||||
$OVERCLOUD_DEPLOY_RELEASE $OVERCLOUD_DEPLOY_HASH)
|
||||
["multinode-overcloud-upgrade.yml"]=$(get_extra_vars_from_release \
|
||||
$OVERCLOUD_TARGET_RELEASE $OVERCLOUD_TARGET_HASH)
|
||||
["multinode-validate.yml"]=$(get_extra_vars_from_release \
|
||||
$OVERCLOUD_TARGET_RELEASE $OVERCLOUD_TARGET_HASH)
|
||||
)
|
||||
|
||||
fi
|
||||
|
||||
declare -A PLAYBOOKS_ARGS=(
|
||||
["baremetal-full-overcloud.yml"]=" --extra-vars validation_args='--validation-errors-nonfatal' "
|
||||
["multinode-overcloud.yml"]=" --extra-vars validation_args='--validation-errors-nonfatal' "
|
||||
["multinode.yml"]=" --extra-vars validation_args='--validation-errors-nonfatal' "
|
||||
)
|
||||
|
||||
mkdir -p $LOCAL_WORKING_DIR
|
||||
# TODO(gcerami) parametrize hosts
|
||||
cp $TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/${ENVIRONMENT}_hosts $LOCAL_WORKING_DIR/hosts
|
||||
pushd $TRIPLEO_ROOT/tripleo-quickstart/
|
||||
|
||||
$QUICKSTART_PREPARE_CMD
|
||||
$QUICKSTART_VENV_CMD
|
||||
|
||||
# Only ansible-playbook command will be used from this point forward, so we
|
||||
# need some variables from quickstart.sh
|
||||
OOOQ_DIR=$TRIPLEO_ROOT/tripleo-quickstart/
|
||||
export OPT_WORKDIR=$LOCAL_WORKING_DIR
|
||||
export ANSIBLE_CONFIG=$OOOQ_DIR/ansible.cfg
|
||||
export ARA_DATABASE="sqlite:///${LOCAL_WORKING_DIR}/ara.sqlite"
|
||||
export VIRTUAL_ENV_DISABLE_PROMPT=1
|
||||
# Workaround for virtualenv issue https://github.com/pypa/virtualenv/issues/1029
|
||||
set +u
|
||||
source $LOCAL_WORKING_DIR/bin/activate
|
||||
set -u
|
||||
source $OOOQ_DIR/ansible_ssh_env.sh
|
||||
[[ -n ${STATS_OOOQ:-''} ]] && export STATS_OOOQ=$(( $(date +%s) - STATS_OOOQ ))
|
||||
|
||||
# Debug step capture env variables
|
||||
if [[ "$PLAYBOOK_DRY_RUN" == "1" ]]; then
|
||||
echo "-- Capture Environment Variables Used ---------"
|
||||
echo "$(env)" | tee -a $LOGS_DIR/toci_env_args_output.log
|
||||
declare -p | tee -a $LOGS_DIR/toci_env_args_output.log
|
||||
fi
|
||||
|
||||
echo "-- Playbooks Output --------------------------"
|
||||
for playbook in $PLAYBOOKS; do
|
||||
echo "$QUICKSTART_INSTALL_CMD \
|
||||
${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
|
||||
$NODES_ARGS \
|
||||
$FEATURESET_CONF \
|
||||
$ENV_VARS \
|
||||
$EXTRA_VARS \
|
||||
$VXLAN_VARS \
|
||||
$DEFAULT_ARGS \
|
||||
$LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-}" \
|
||||
| sed 's/--/\n--/g' \
|
||||
| tee -a $LOGS_DIR/playbook_executions.log
|
||||
echo "# --------------------------------------- " \
|
||||
| tee -a $LOGS_DIR/playbook_executions.log
|
||||
done
|
||||
|
||||
if [[ "$PLAYBOOK_DRY_RUN" == "1" ]]; then
|
||||
exit_value=0
|
||||
else
|
||||
for playbook in $PLAYBOOKS; do
|
||||
echo "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}"
|
||||
run_with_timeout $START_JOB_TIME $QUICKSTART_INSTALL_CMD \
|
||||
"${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
|
||||
$NODES_ARGS \
|
||||
$FEATURESET_CONF \
|
||||
$ENV_VARS \
|
||||
$EXTRA_VARS \
|
||||
$VXLAN_VARS \
|
||||
$DEFAULT_ARGS \
|
||||
--extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
|
||||
$LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
|
||||
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
|
||||
|
||||
# Print status of playbook run
|
||||
[[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"
|
||||
[[ "$exit_value" != 0 ]] && echo "Playbook run of $playbook failed" && break
|
||||
done
|
||||
|
||||
[[ "$exit_value" == 0 ]] && echo "Playbook run passed successfully" || echo "Playbook run failed"
|
||||
|
||||
## LOGS COLLECTION
|
||||
collect_logs
|
||||
|
||||
fi
|
||||
|
||||
popd
|
||||
|
||||
sudo unbound-control dump_cache > /tmp/dns_cache.txt
|
||||
sudo chown ${USER}: /tmp/dns_cache.txt
|
||||
cat /tmp/dns_cache.txt | gzip - > $LOGS_DIR/dns_cache.txt.gz
|
||||
|
||||
if [[ "$PERIODIC" == 1 && -e $WORKSPACE/hash_info.sh ]] ; then
|
||||
echo export JOB_EXIT_VALUE=$exit_value >> $WORKSPACE/hash_info.sh
|
||||
fi
|
||||
|
||||
mkdir -p $LOGS_DIR/quickstart_files
|
||||
find $LOCAL_WORKING_DIR -maxdepth 1 -type f -not -name "*sqlite" | while read i; do gzip -cf $i > $LOGS_DIR/quickstart_files/$(basename $i).txt.gz; done
|
||||
echo 'Quickstart completed.'
|
||||
exit $exit_value
|
Loading…
Reference in New Issue