Remove stale bash scripts from tripleo-ci repo
As discussed in the parent patch [1] we should remove these stale file as they are no longer used. The oooq_common_functions, toci_gate_test, toci_quickstart have all been replaced by the templated versions in [2]. The heat-deploy-times.py is replaced by the script at [3] in the ansible-role-collect-logs repo. The generate-kill-heat.sh has not been touched in over 2 years and as far as I can see isn't being used somewhere (code search). Some of these especially the common_functions/toci_ files are updated out of confusion and causing unnecessary maintenance burden. [1] https://review.opendev.org/c/openstack/tripleo-ci/+/782549/5#message-f29d32c0c652d0929f680030e1a4c970fb111c2d [2] https://opendev.org/openstack/tripleo-ci/src/branch/master/roles/run-test/templates [3] https://opendev.org/openstack/ansible-role-collect-logs/src/branch/master/roles/collect_logs/files/heat-deploy-times.py Change-Id: If248d145cdcaa448de601b24a80a11d567fb235f
This commit is contained in:
parent
a92f3c4065
commit
963ac72a0a
|
@ -1,63 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs
|
|
||||||
# just have the branch they're proposed to, e.g ZUUL_BRANCH, in both
|
|
||||||
# cases we need to set STABLE_RELEASE to match for tripleo.sh
|
|
||||||
export ZUUL_BRANCH=${ZUUL_BRANCH:-""}
|
|
||||||
|
|
||||||
# puppet-ceph has stable/jewel branch where we want to test the latest
|
|
||||||
# version of TripleO where puppet-ceph is actually used.
|
|
||||||
# Since we switched to ceph-ansible during Pike, we're testing
|
|
||||||
# TripleO with Ocata version which is probably fine.
|
|
||||||
if [ "$ZUUL_BRANCH" = "stable/jewel" ]; then
|
|
||||||
export STABLE_RELEASE="ocata"
|
|
||||||
fi
|
|
||||||
# For OVB repo "stable/1.0" is feature branch
|
|
||||||
if [ "$ZUUL_BRANCH" = "stable/1.0" -a "$ZUUL_PROJECT" = "openstack/openstack-virtual-baremetal" ]; then
|
|
||||||
export ZUUL_BRANCH="master"
|
|
||||||
fi
|
|
||||||
# For puppet-pacemaker "stable/1.1.x" is a centos7 support branch
|
|
||||||
if [ "$ZUUL_BRANCH" = "stable/1.1.x" -a "$ZUUL_PROJECT" = "openstack/puppet-pacemaker" ]; then
|
|
||||||
export ZUUL_BRANCH="stable/train"
|
|
||||||
export STABLE_RELEASE="train"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
export OVERRIDE_ZUUL_BRANCH=${OVERRIDE_ZUUL_BRANCH:-""}
|
|
||||||
export STABLE_RELEASE=${STABLE_RELEASE:-""}
|
|
||||||
export FEATURE_BRANCH=${FEATURE_BRANCH:-""}
|
|
||||||
# In upgrade jobs STABLE_RELEASE is changed to point to the initial
|
|
||||||
# deployment branch but we need to keep the actual release for the review
|
|
||||||
# to be used in delorean-build phase.
|
|
||||||
export REVIEW_RELEASE=${REVIEW_RELEASE:-""}
|
|
||||||
if [[ -z $STABLE_RELEASE ]]; then
|
|
||||||
if [[ $ZUUL_BRANCH =~ ^stable/ ]]; then
|
|
||||||
export STABLE_RELEASE=${ZUUL_BRANCH#stable/}
|
|
||||||
export REVIEW_RELEASE=${ZUUL_BRANCH#stable/}
|
|
||||||
fi
|
|
||||||
if [[ $OVERRIDE_ZUUL_BRANCH =~ ^stable/ ]]; then
|
|
||||||
export STABLE_RELEASE=${OVERRIDE_ZUUL_BRANCH#stable/}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if we still don't have an stable branch, check if that
|
|
||||||
# is a feature branch
|
|
||||||
if [ -z "$STABLE_RELEASE" ] && [ "$ZUUL_BRANCH" != "master" ]; then
|
|
||||||
export FEATURE_BRANCH=$ZUUL_BRANCH
|
|
||||||
fi
|
|
||||||
|
|
||||||
export TRIPLEO_ROOT=${TRIPLEO_ROOT:-"/opt/stack/new"}
|
|
||||||
export WORKSPACE=${WORKSPACE:-"$TRIPLEO_ROOT/workspace"}
|
|
||||||
export DEVSTACK_GATE_TIMEOUT=${DEVSTACK_GATE_TIMEOUT:-"180"}
|
|
||||||
export PATH=/sbin:/usr/sbin:$PATH
|
|
||||||
|
|
||||||
export UNDERCLOUD_VM_NAME=instack
|
|
||||||
|
|
||||||
export REMAINING_TIME=${REMAINING_TIME:-180}
|
|
||||||
export NODEPOOL_DOCKER_REGISTRY_V2_PROXY=${NODEPOOL_DOCKER_REGISTRY_V2_PROXY:-""}
|
|
||||||
|
|
||||||
# post ci chores to run at the end of ci
|
|
||||||
SSH_OPTIONS='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32'
|
|
||||||
|
|
||||||
# NOTE(pabelanger): this logic should be inverted to only include what developers need, not exclude things on the filesystem.
|
|
||||||
TARCMD="sudo XZ_OPT=-3 tar -cJf - --exclude=var/log/journal --exclude=udev/hwdb.bin --exclude=etc/puppet/modules --exclude=etc/project-config --exclude=etc/services --exclude=selinux/targeted --exclude=etc/services --exclude=etc/pki /var/log /etc"
|
|
||||||
JLOGCMD="sudo journalctl --output short-precise | sudo dd of=/var/log/journal-text.txt"
|
|
|
@ -1,26 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
TMPFILE=$(mktemp)
|
|
||||||
TMP2FILE=$(mktemp)
|
|
||||||
|
|
||||||
function heat_resource_metadata {
|
|
||||||
# Build os-collect-config command line arguments for the given heat
|
|
||||||
# resource, which when run, allow us to collect the heat completion
|
|
||||||
# signals.
|
|
||||||
heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/, / --cfn-/g' -e 's/^ /--cfn-/' -e 's/$/ --print/'
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
>$TMPFILE
|
|
||||||
heat_resource_metadata controller0 >>$TMPFILE
|
|
||||||
for i in $(seq 0 34) ; do
|
|
||||||
heat_resource_metadata NovaCompute$i >>$TMPFILE
|
|
||||||
done
|
|
||||||
|
|
||||||
sed -e 's/^/os-collect-config /' -e 's/$/ \&/' < $TMPFILE > $TMP2FILE
|
|
||||||
echo "#!/bin/sh\nset -e\n" > $TMPFILE
|
|
||||||
cat $TMPFILE $TMP2FILE > "kill-heat"
|
|
||||||
chmod +x "kill-heat"
|
|
|
@ -1,59 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# Usage: openstack stack event list -f json overcloud | \
|
|
||||||
# heat-deploy-times.py [list of resource names]
|
|
||||||
# If no resource names are provided, all of the resources will be output.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
def process_events(all_events, events):
|
|
||||||
times = {}
|
|
||||||
for event in all_events:
|
|
||||||
name = event['resource_name']
|
|
||||||
status = event['resource_status']
|
|
||||||
# Older clients return timestamps in the first format, newer ones
|
|
||||||
# append a Z. This way we can handle both formats.
|
|
||||||
try:
|
|
||||||
strptime = time.strptime(event['event_time'], '%Y-%m-%dT%H:%M:%S')
|
|
||||||
except ValueError:
|
|
||||||
strptime = time.strptime(event['event_time'], '%Y-%m-%dT%H:%M:%SZ')
|
|
||||||
etime = time.mktime(strptime)
|
|
||||||
if name in events:
|
|
||||||
if status == 'CREATE_IN_PROGRESS':
|
|
||||||
times[name] = {'start': etime, 'elapsed': None}
|
|
||||||
elif status == 'CREATE_COMPLETE' or status == 'CREATE_FAILED':
|
|
||||||
times[name]['elapsed'] = etime - times[name]['start']
|
|
||||||
for name, data in sorted(
|
|
||||||
times.items(), key=lambda x: x[1]['elapsed'], reverse=True
|
|
||||||
):
|
|
||||||
elapsed = 'Still in progress'
|
|
||||||
if times[name]['elapsed'] is not None:
|
|
||||||
elapsed = times[name]['elapsed']
|
|
||||||
print('%s %s' % (name, elapsed))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
stdin = sys.stdin.read()
|
|
||||||
all_events = json.loads(stdin)
|
|
||||||
events = sys.argv[1:]
|
|
||||||
if not events:
|
|
||||||
events = set()
|
|
||||||
for event in all_events:
|
|
||||||
events.add(event['resource_name'])
|
|
||||||
process_events(all_events, events)
|
|
|
@ -1,180 +0,0 @@
|
||||||
function previous_release_from {
|
|
||||||
local release="${1:-master}"
|
|
||||||
local type="${2:-mixed_upgrade}"
|
|
||||||
local previous_version=""
|
|
||||||
case "${type}" in
|
|
||||||
'mixed_upgrade')
|
|
||||||
previous_version=$(previous_release_mixed_upgrade_case "${release}");;
|
|
||||||
'ffu_upgrade')
|
|
||||||
previous_version=$(previous_release_ffu_upgrade_case "${release}");;
|
|
||||||
*)
|
|
||||||
echo "UNKNOWN_TYPE"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "${previous_version}"
|
|
||||||
}
|
|
||||||
|
|
||||||
function previous_release_mixed_upgrade_case {
|
|
||||||
local release="${1:-master}"
|
|
||||||
case "${release}" in
|
|
||||||
''|master|promotion-testing-hash-master)
|
|
||||||
echo "wallaby"
|
|
||||||
;;
|
|
||||||
wallaby|promotion-testing-hash-wallaby)
|
|
||||||
echo "victoria"
|
|
||||||
;;
|
|
||||||
victoria|promotion-testing-hash-victoria)
|
|
||||||
echo "ussuri"
|
|
||||||
;;
|
|
||||||
ussuri|promotion-testing-hash-ussuri)
|
|
||||||
echo "train"
|
|
||||||
;;
|
|
||||||
train|promotion-testing-hash-train)
|
|
||||||
echo "stein"
|
|
||||||
;;
|
|
||||||
stein|promotion-testing-hash-stein)
|
|
||||||
echo "rocky"
|
|
||||||
;;
|
|
||||||
queens)
|
|
||||||
echo "pike"
|
|
||||||
;;
|
|
||||||
pike)
|
|
||||||
echo "ocata"
|
|
||||||
;;
|
|
||||||
ocata)
|
|
||||||
echo "newton"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "UNKNOWN_RELEASE"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
function previous_release_ffu_upgrade_case {
|
|
||||||
local release="${1:-master}"
|
|
||||||
|
|
||||||
case "${release}" in
|
|
||||||
''|master)
|
|
||||||
# NOTE: we need to update this when we cut a stable branch
|
|
||||||
echo "newton"
|
|
||||||
;;
|
|
||||||
queens)
|
|
||||||
echo "newton"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "INVALID_RELEASE_FOR_FFU"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
function is_featureset {
|
|
||||||
local type="${1}"
|
|
||||||
local featureset_file="${2}"
|
|
||||||
|
|
||||||
[[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
function run_with_timeout {
|
|
||||||
# First parameter is the START_JOB_TIME
|
|
||||||
# Second is the command to be executed
|
|
||||||
JOB_TIME=$1
|
|
||||||
shift
|
|
||||||
COMMAND=$@
|
|
||||||
# Leave 20 minutes for quickstart logs collection for ovb only
|
|
||||||
if [[ "$TOCI_JOBTYPE" =~ "ovb" ]]; then
|
|
||||||
RESERVED_LOG_TIME=20
|
|
||||||
else
|
|
||||||
RESERVED_LOG_TIME=3
|
|
||||||
fi
|
|
||||||
# Use $REMAINING_TIME of infra to calculate maximum time for remaining part of job
|
|
||||||
REMAINING_TIME=${REMAINING_TIME:-180}
|
|
||||||
TIME_FOR_COMMAND=$(( REMAINING_TIME - ($(date +%s) - JOB_TIME)/60 - $RESERVED_LOG_TIME))
|
|
||||||
|
|
||||||
if [[ $TIME_FOR_COMMAND -lt 1 ]]; then
|
|
||||||
return 143
|
|
||||||
fi
|
|
||||||
/usr/bin/timeout --preserve-status ${TIME_FOR_COMMAND}m ${COMMAND}
|
|
||||||
}
|
|
||||||
|
|
||||||
function create_collect_logs_script {
|
|
||||||
cat <<-EOF > $LOGS_DIR/collect_logs.sh
|
|
||||||
#!/bin/bash
|
|
||||||
set -x
|
|
||||||
|
|
||||||
export NODEPOOL_PROVIDER=${NODEPOOL_PROVIDER:-''}
|
|
||||||
export STATS_TESTENV=${STATS_TESTENV:-''}
|
|
||||||
export STATS_OOOQ=${STATS_OOOQ:-''}
|
|
||||||
export START_JOB_TIME=${START_JOB_TIME:-''}
|
|
||||||
export ZUUL_PIPELINE=${ZUUL_PIPELINE:-''}
|
|
||||||
export DEVSTACK_GATE_TIMEOUT=${DEVSTACK_GATE_TIMEOUT:-''}
|
|
||||||
export REMAINING_TIME=${REMAINING_TIME:-''}
|
|
||||||
export LOCAL_WORKING_DIR="$WORKSPACE/.quickstart"
|
|
||||||
export OPT_WORKDIR=$LOCAL_WORKING_DIR
|
|
||||||
export WORKING_DIR="$HOME"
|
|
||||||
export LOGS_DIR=$WORKSPACE/logs
|
|
||||||
export VIRTUAL_ENV_DISABLE_PROMPT=1
|
|
||||||
export ANSIBLE_CONFIG=$OOOQ_DIR/ansible.cfg
|
|
||||||
export ARA_DATABASE=sqlite:///${LOCAL_WORKING_DIR}/ara.sqlite
|
|
||||||
export ZUUL_CHANGES=${ZUUL_CHANGES:-''}
|
|
||||||
export NODES_FILE=${NODES_FILE:-''}
|
|
||||||
export TOCI_JOBTYPE=$TOCI_JOBTYPE
|
|
||||||
export STABLE_RELEASE=${STABLE_RELEASE:-''}
|
|
||||||
export QUICKSTART_RELEASE=${QUICKSTART_RELEASE:-''}
|
|
||||||
|
|
||||||
set +u
|
|
||||||
source $LOCAL_WORKING_DIR/bin/activate
|
|
||||||
set -u
|
|
||||||
source $OOOQ_DIR/ansible_ssh_env.sh
|
|
||||||
|
|
||||||
/usr/bin/timeout --preserve-status 40m $QUICKSTART_COLLECTLOGS_CMD > $LOGS_DIR/quickstart_collect_logs.log || \
|
|
||||||
echo "WARNING: quickstart collect-logs failed, check quickstart_collectlogs.log for details"
|
|
||||||
|
|
||||||
if [ -f $LOGS_DIR/undercloud/var/log/postci.txt.gz ]; then
|
|
||||||
cp $LOGS_DIR/undercloud/var/log/postci.txt.gz $LOGS_DIR/
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -e $LOGS_DIR/undercloud/home/$USER/tempest/testrepository.subunit.gz ]]; then
|
|
||||||
cp $LOGS_DIR/undercloud/home/$USER/tempest/testrepository.subunit.gz ${LOGS_DIR}/testrepository.subunit.gz
|
|
||||||
elif [[ -e $LOGS_DIR/undercloud/home/$USER/pingtest.subunit.gz ]]; then
|
|
||||||
cp $LOGS_DIR/undercloud/home/$USER/pingtest.subunit.gz ${LOGS_DIR}/testrepository.subunit.gz
|
|
||||||
elif [[ -e $LOGS_DIR/undercloud/home/$USER/undercloud_sanity.subunit.gz ]]; then
|
|
||||||
cp $LOGS_DIR/undercloud/home/$USER/undercloud_sanity.subunit.gz ${LOGS_DIR}/testrepository.subunit.gz
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Copy tempest.html to root dir
|
|
||||||
if [ -f $LOGS_DIR/undercloud/home/$USER/tempest/tempest.html.gz ]; then
|
|
||||||
cp $LOGS_DIR/undercloud/home/$USER/tempest/tempest.html.gz ${LOGS_DIR}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Copy tempest and .testrepository directory to /opt/stack/new/tempest and
|
|
||||||
# unzip
|
|
||||||
sudo -s -- <<SUDO
|
|
||||||
mkdir -p /opt/stack/new
|
|
||||||
if [ -d $LOGS_DIR/undercloud/home/$USER/tempest ]; then
|
|
||||||
cp -Rf $LOGS_DIR/undercloud/home/$USER/tempest /opt/stack/new
|
|
||||||
fi
|
|
||||||
if [ -d /opt/stack/new/tempest/.testrepository ]; then
|
|
||||||
gzip -d -r /opt/stack/new/tempest/.testrepository
|
|
||||||
fi
|
|
||||||
SUDO
|
|
||||||
|
|
||||||
# record the size of the logs directory
|
|
||||||
# -L, --dereference dereference all symbolic links
|
|
||||||
# Note: tail -n +1 is to prevent the error 'Broken Pipe' e.g. 'sort: write failed: standard output: Broken pipe'
|
|
||||||
|
|
||||||
du -L -ch $LOGS_DIR/* | tail -n +1 | sort -rh | head -n 200 &> $LOGS_DIR/log-size.txt || true
|
|
||||||
EOF
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
get_extra_vars_from_release()
|
|
||||||
{
|
|
||||||
local release_name=$1
|
|
||||||
local release_hash=$2
|
|
||||||
local newest_release_hash=${3:-""}
|
|
||||||
local release_file=$LOCAL_WORKING_DIR/config/release/tripleo-ci/${DISTRIBUTION:-CentOS}-${DISTRIBUTION_MAJOR_VERSION:-7}/$release_name.yml
|
|
||||||
echo "--extra-vars @$release_file -e dlrn_hash=$release_hash -e get_build_command=$release_hash ${newest_release_hash:+-e dlrn_hash_newest=$newest_release_hash}"
|
|
||||||
}
|
|
|
@ -1,308 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
source $(dirname $0)/scripts/common_vars.bash
|
|
||||||
source $(dirname $0)/scripts/common_functions.sh
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
export START_JOB_TIME=$(date +%s)
|
|
||||||
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
|
|
||||||
|
|
||||||
source $TRIPLEO_ROOT/tripleo-ci/scripts/oooq_common_functions.sh
|
|
||||||
|
|
||||||
if [ -f /etc/nodepool/provider ] ; then
|
|
||||||
# this sets
|
|
||||||
# NODEPOOL_PROVIDER (e.g tripleo-test-cloud-rh1)
|
|
||||||
# NODEPOOL_CLOUD (e.g.tripleo-test-cloud-rh1)
|
|
||||||
# NODEPOOL_REGION (e.g. regionOne)
|
|
||||||
# NODEPOOL_AZ
|
|
||||||
source /etc/nodepool/provider
|
|
||||||
|
|
||||||
# source variables common across all the scripts.
|
|
||||||
if [ -e /etc/ci/mirror_info.sh ]; then
|
|
||||||
source /etc/ci/mirror_info.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
export RHCLOUD=''
|
|
||||||
if [[ ${NODEPOOL_PROVIDER:-''} == 'rdo-cloud'* ]]; then
|
|
||||||
RHCLOUD='rdocloud'
|
|
||||||
elif [ ${NODEPOOL_PROVIDER:-''} == 'vexxhost-nodepool-tripleo' ]; then
|
|
||||||
RHCLOUD='vexxhost'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n $RHCLOUD ]; then
|
|
||||||
source $(dirname $0)/scripts/$RHCLOUD.env
|
|
||||||
|
|
||||||
# In order to save space remove the cached git repositories, at this point in
|
|
||||||
# CI the ones we are interested in have been cloned to /opt/stack/new. We
|
|
||||||
# can also remove some distro images cached on the images.
|
|
||||||
# rm -rf spawns a separate process for each file, lets use find -delete
|
|
||||||
sudo find /opt/git -delete || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# default $NODEPOOL_PROVIDER if not already set as it's used later
|
|
||||||
export NODEPOOL_PROVIDER=${NODEPOOL_PROVIDER:-""}
|
|
||||||
|
|
||||||
|
|
||||||
# create logs dir (check if collect-logs doesn't already do this)
|
|
||||||
mkdir -p $WORKSPACE/logs
|
|
||||||
|
|
||||||
# Set job as failed until it's overwritten by pingtest/tempest real test subunit
|
|
||||||
cat $TRIPLEO_ROOT/tripleo-ci/scripts/fake_fail_subunit | gzip - > $WORKSPACE/logs/testrepository.subunit.gz
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(trown): In openstack-infra we have pip already, but this will ensure we
|
|
||||||
# have it available in other environments.
|
|
||||||
command -v pip || \
|
|
||||||
(curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"; sudo python get-pip.py)
|
|
||||||
|
|
||||||
sudo yum -y install python-requests python-urllib3
|
|
||||||
sudo pip install shyaml
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Sets whether or not this job will upload images.
|
|
||||||
export PERIODIC=0
|
|
||||||
# Sets which repositories to use in the job
|
|
||||||
export QUICKSTART_RELEASE="${STABLE_RELEASE:-master}"
|
|
||||||
# Stores OVB undercloud instance id
|
|
||||||
export UCINSTANCEID="null"
|
|
||||||
# Define environment variables file
|
|
||||||
export ENV_VARS=""
|
|
||||||
# Define file with set of features to test
|
|
||||||
export FEATURESET_FILE=""
|
|
||||||
export FEATURESET_CONF=""
|
|
||||||
# Define file with nodes topology
|
|
||||||
export NODES_FILE=""
|
|
||||||
# Set the number of overcloud nodes
|
|
||||||
export NODECOUNT=0
|
|
||||||
# Sets the undercloud hostname
|
|
||||||
export UNDERCLOUD=""
|
|
||||||
# Select the tags to run
|
|
||||||
export TAGS=all
|
|
||||||
# Identify in which environment we're deploying
|
|
||||||
export ENVIRONMENT=""
|
|
||||||
# Set the overcloud hosts for multinode
|
|
||||||
export OVERCLOUD_HOSTS=
|
|
||||||
export CONTROLLER_HOSTS=
|
|
||||||
export SUBNODES_SSH_KEY=
|
|
||||||
TIMEOUT_SECS=$((DEVSTACK_GATE_TIMEOUT*60))
|
|
||||||
export EXTRA_VARS=${EXTRA_VARS:-""}
|
|
||||||
export VXLAN_VARS=${VXLAN_VARS:-""}
|
|
||||||
export NODES_ARGS=""
|
|
||||||
export EXTRANODE=""
|
|
||||||
export EMIT_RELEASES_EXTRA_ARGS=""
|
|
||||||
# Set playbook execution status
|
|
||||||
export PLAYBOOK_DRY_RUN=${PLAYBOOK_DRY_RUN:=0}
|
|
||||||
export COLLECT_CONF="$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/collect-logs.yml"
|
|
||||||
LOCAL_WORKING_DIR="$WORKSPACE/.quickstart"
|
|
||||||
LWD=$LOCAL_WORKING_DIR
|
|
||||||
QUICKSTART_SH_JOBS="ovb-3ctlr_1comp-featureset001 multinode-1ctlr-featureset010"
|
|
||||||
|
|
||||||
export RELEASES_FILE_OUTPUT=$WORKSPACE/logs/releases.sh
|
|
||||||
export RELEASES_SCRIPT=$TRIPLEO_ROOT/tripleo-ci/scripts/emit_releases_file/emit_releases_file.py
|
|
||||||
export RELEASES_SCRIPT_LOGFILE=$WORKSPACE/logs/emit_releases_file.log
|
|
||||||
|
|
||||||
# Assemble quickstart configuration based on job type keywords
|
|
||||||
for JOB_TYPE_PART in $(sed 's/-/ /g' <<< "${TOCI_JOBTYPE:-}") ; do
|
|
||||||
case $JOB_TYPE_PART in
|
|
||||||
featureset*)
|
|
||||||
FEATURESET_FILE="$LWD/config/general_config/$JOB_TYPE_PART.yml"
|
|
||||||
# featurset_file is not yet in its final destination so we
|
|
||||||
# have to use current_featureset_file.
|
|
||||||
CURRENT_FEATURESET_FILE="$TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$JOB_TYPE_PART.yml"
|
|
||||||
FEATURESET_CONF="$FEATURESET_CONF --extra-vars @$FEATURESET_FILE"
|
|
||||||
MIXED_UPGRADE_TYPE=''
|
|
||||||
# Order matters. ffu featureset has both mixed version and ffu_overcloud_upgrade.
|
|
||||||
if is_featureset ffu_overcloud_upgrade "${CURRENT_FEATURESET_FILE}"; then
|
|
||||||
MIXED_UPGRADE_TYPE='ffu_upgrade'
|
|
||||||
elif is_featureset mixed_upgrade "${CURRENT_FEATURESET_FILE}"; then
|
|
||||||
MIXED_UPGRADE_TYPE='mixed_upgrade'
|
|
||||||
elif is_featureset overcloud_update "${CURRENT_FEATURESET_FILE}"; then
|
|
||||||
TAGS="$TAGS,overcloud-update"
|
|
||||||
elif is_featureset undercloud_upgrade "${CURRENT_FEATURESET_FILE}"; then
|
|
||||||
TAGS="$TAGS,undercloud-upgrade"
|
|
||||||
export UPGRADE_RELEASE=$QUICKSTART_RELEASE
|
|
||||||
export QUICKSTART_RELEASE=$(previous_release_mixed_upgrade_case "${UPGRADE_RELEASE}")
|
|
||||||
fi
|
|
||||||
# The case is iterating over TOCI_JOBTYPE which is
|
|
||||||
# standalone-featureset. So featureset comes after and we
|
|
||||||
# can override TAGS safely.
|
|
||||||
if is_featureset standalone_upgrade "${CURRENT_FEATURESET_FILE}" ; then
|
|
||||||
# We don't want "build" as it would wrongly build test
|
|
||||||
# package under the N-1 version.
|
|
||||||
TAGS="standalone,standalone-upgrade"
|
|
||||||
fi
|
|
||||||
# Set UPGRADE_RELEASE if applicable
|
|
||||||
if [ -n "${MIXED_UPGRADE_TYPE}" ]; then
|
|
||||||
export UPGRADE_RELEASE=$(previous_release_from "${STABLE_RELEASE}" "${MIXED_UPGRADE_TYPE}")
|
|
||||||
QUICKSTART_RELEASE="$QUICKSTART_RELEASE-undercloud-$UPGRADE_RELEASE-overcloud"
|
|
||||||
# Run overcloud-upgrade tag only in upgrades jobs
|
|
||||||
TAGS="$TAGS,overcloud-upgrade"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
ovb)
|
|
||||||
OVB=1
|
|
||||||
ENVIRONMENT="ovb"
|
|
||||||
METADATA_FILENAME='/mnt/config/openstack/latest/meta_data.json'
|
|
||||||
if sudo test -f $METADATA_FILENAME; then
|
|
||||||
METADATA=$(sudo cat /mnt/config/openstack/latest/meta_data.json)
|
|
||||||
set +x
|
|
||||||
UCINSTANCEID=$(echo $METADATA | python -c 'import json, sys; print json.load(sys.stdin)["uuid"]')
|
|
||||||
set -x
|
|
||||||
else
|
|
||||||
UCINSTANCEID=$(http_proxy= curl http://169.254.169.254/openstack/2015-10-15/meta_data.json | python -c 'import json, sys; print json.load(sys.stdin)["uuid"]')
|
|
||||||
fi
|
|
||||||
if [[ " $QUICKSTART_SH_JOBS " =~ " $TOCI_JOBTYPE " ]]; then
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"baremetal-full-deploy.yml"}
|
|
||||||
else
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"ovb-setup.yml baremetal-full-undercloud.yml baremetal-full-overcloud-prep.yml baremetal-full-overcloud.yml baremetal-full-overcloud-validate.yml browbeat-minimal.yml"}
|
|
||||||
fi
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/ovb.yml"
|
|
||||||
if [[ -f "$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/ovb-$RHCLOUD.yml" ]]; then
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/ovb-$RHCLOUD.yml"
|
|
||||||
fi
|
|
||||||
UNDERCLOUD="undercloud"
|
|
||||||
;;
|
|
||||||
multinode)
|
|
||||||
SUBNODES_SSH_KEY=/etc/nodepool/id_rsa
|
|
||||||
ENVIRONMENT="osinfra"
|
|
||||||
if [[ " $QUICKSTART_SH_JOBS " =~ " $TOCI_JOBTYPE " ]]; then
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"multinode.yml"}
|
|
||||||
else
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"quickstart.yml multinode-undercloud.yml multinode-overcloud-prep.yml multinode-overcloud.yml multinode-overcloud-update.yml multinode-overcloud-upgrade.yml multinode-validate.yml"}
|
|
||||||
fi
|
|
||||||
FEATURESET_CONF=" --extra-vars @$LWD/config/general_config/featureset-multinode-common.yml $FEATURESET_CONF"
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode.yml"
|
|
||||||
if [[ $NODEPOOL_PROVIDER == "rdo-cloud"* ]]; then
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode-rdocloud.yml"
|
|
||||||
fi
|
|
||||||
UNDERCLOUD="127.0.0.2"
|
|
||||||
TAGS="build,undercloud-setup,undercloud-scripts,undercloud-install,undercloud-post-install,tripleo-validations,overcloud-scripts,overcloud-prep-config,overcloud-prep-containers,overcloud-deploy,overcloud-post-deploy,overcloud-validate"
|
|
||||||
CONTROLLER_HOSTS=$(sed -n 1,1p /etc/nodepool/sub_nodes_private)
|
|
||||||
OVERCLOUD_HOSTS=$(cat /etc/nodepool/sub_nodes_private)
|
|
||||||
;;
|
|
||||||
singlenode)
|
|
||||||
ENVIRONMENT="osinfra"
|
|
||||||
UNDERCLOUD="127.0.0.2"
|
|
||||||
if [[ " $QUICKSTART_SH_JOBS " =~ " $TOCI_JOBTYPE " ]]; then
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"multinode.yml"}
|
|
||||||
else
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"quickstart.yml multinode-undercloud.yml multinode-undercloud-upgrade.yml multinode-overcloud-prep.yml multinode-overcloud.yml multinode-overcloud-upgrade.yml multinode-validate.yml"}
|
|
||||||
fi
|
|
||||||
FEATURESET_CONF=" --extra-vars @$LWD/config/general_config/featureset-multinode-common.yml $FEATURESET_CONF"
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode.yml"
|
|
||||||
if [[ $NODEPOOL_PROVIDER == "rdo-cloud"* ]]; then
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode-rdocloud.yml"
|
|
||||||
fi
|
|
||||||
TAGS="build,undercloud-setup,undercloud-scripts,undercloud-install,undercloud-validate,images"
|
|
||||||
;;
|
|
||||||
standalone)
|
|
||||||
ENVIRONMENT="osinfra"
|
|
||||||
UNDERCLOUD="127.0.0.2"
|
|
||||||
# Adding upgrade playbook here to be consistant with the v3 definition.
|
|
||||||
export PLAYBOOKS=${PLAYBOOKS:-"quickstart.yml multinode-standalone.yml multinode-standalone-upgrade.yml "}
|
|
||||||
FEATURESET_CONF=" --extra-vars @$LWD/config/general_config/featureset-multinode-common.yml $FEATURESET_CONF"
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode.yml"
|
|
||||||
if [[ $NODEPOOL_PROVIDER == "rdo-cloud"* ]]; then
|
|
||||||
ENV_VARS="$ENV_VARS --extra-vars @$TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/multinode-rdocloud.yml"
|
|
||||||
fi
|
|
||||||
TAGS="build,standalone"
|
|
||||||
;;
|
|
||||||
periodic)
|
|
||||||
PERIODIC=1
|
|
||||||
QUICKSTART_RELEASE="promotion-testing-hash-${QUICKSTART_RELEASE}"
|
|
||||||
EMIT_RELEASES_EXTRA_ARGS="$EMIT_RELEASES_EXTRA_ARGS --is-periodic"
|
|
||||||
;;
|
|
||||||
gate)
|
|
||||||
;;
|
|
||||||
dryrun)
|
|
||||||
PLAYBOOK_DRY_RUN=1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# the rest should be node configuration
|
|
||||||
NODES_FILE="$TRIPLEO_ROOT/tripleo-quickstart/config/nodes/$JOB_TYPE_PART.yml"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
|
|
||||||
if [[ -f "$RELEASES_SCRIPT" ]] && [[ $FEATURESET_FILE =~ 010|011|037|047|050|056 ]]; then
|
|
||||||
|
|
||||||
python $RELEASES_SCRIPT \
|
|
||||||
--stable-release ${STABLE_RELEASE:-"master"} \
|
|
||||||
--featureset-file $TRIPLEO_ROOT/tripleo-quickstart/config/general_config/$(basename $FEATURESET_FILE) \
|
|
||||||
--output-file $RELEASES_FILE_OUTPUT \
|
|
||||||
--log-file $RELEASES_SCRIPT_LOGFILE \
|
|
||||||
$EMIT_RELEASES_EXTRA_ARGS
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [[ ! -z $NODES_FILE ]]; then
|
|
||||||
pushd $TRIPLEO_ROOT/tripleo-quickstart
|
|
||||||
NODECOUNT=$(shyaml get-value node_count < $NODES_FILE)
|
|
||||||
popd
|
|
||||||
NODES_ARGS="--extra-vars @$NODES_FILE"
|
|
||||||
for PART in $(sed 's/_/ /g' <<< "$NODES_FILE") ; do
|
|
||||||
if [[ "$PART" == *"supp"* ]]; then
|
|
||||||
EXTRANODE=" --extra-nodes ${PART//[!0-9]/} "
|
|
||||||
fi;
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Import gated tripleo-upgrade in oooq for upgrades/updates jobs
|
|
||||||
if [[ -d $TRIPLEO_ROOT/tripleo-upgrade ]]; then
|
|
||||||
echo "file://${TRIPLEO_ROOT}/tripleo-upgrade/#egg=tripleo-upgrade" >> ${TRIPLEO_ROOT}/tripleo-quickstart/quickstart-extras-requirements.txt
|
|
||||||
else
|
|
||||||
# Otherwise, if not importing it, oooq will fail when loading
|
|
||||||
# tripleo-upgrade role in the playbook.
|
|
||||||
echo "git+https://opendev.org/openstack/tripleo-upgrade.git@${ZUUL_BRANCH}#egg=tripleo-upgrade" >> ${TRIPLEO_ROOT}/tripleo-quickstart/quickstart-extras-requirements.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Import gated external repo in oooq
|
|
||||||
for EXTERNAL_REPO in 'browbeat' 'tripleo-ha-utils' 'tripleo-quickstart-extras'; do
|
|
||||||
if [[ -d $TRIPLEO_ROOT/$EXTERNAL_REPO ]]; then
|
|
||||||
sed -i "s#git+https://opendev.org/openstack/$EXTERNAL_REPO#file://${TRIPLEO_ROOT}/$EXTERNAL_REPO#1" ${TRIPLEO_ROOT}/tripleo-quickstart/quickstart-extras-requirements.txt
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Start time tracking
|
|
||||||
export STATS_TESTENV=$(date +%s)
|
|
||||||
pushd $TRIPLEO_ROOT/tripleo-ci
|
|
||||||
if [ -e $WORKSPACE/instackenv.json -a "$ENVIRONMENT" = "ovb" ] ; then
|
|
||||||
echo "Running without te-broker"
|
|
||||||
export TE_DATAFILE=$WORKSPACE/instackenv.json
|
|
||||||
./toci_quickstart.sh
|
|
||||||
elif [ "$ENVIRONMENT" = "ovb" ] ; then
|
|
||||||
# We only support multi-nic at the moment
|
|
||||||
NETISO_ENV="multi-nic"
|
|
||||||
./toci_quickstart.sh
|
|
||||||
else
|
|
||||||
|
|
||||||
# Copy nodepool keys to current user
|
|
||||||
sudo cp /etc/nodepool/id_rsa* $HOME/.ssh/
|
|
||||||
sudo chown $USER:$USER $HOME/.ssh/id_rsa*
|
|
||||||
chmod 0600 $HOME/.ssh/id_rsa*
|
|
||||||
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
|
|
||||||
# pre-ansible requirement
|
|
||||||
sudo mkdir -p /root/.ssh/
|
|
||||||
# everything below here *MUST* be translated to a role ASAP
|
|
||||||
# empty image to fool overcloud deployment
|
|
||||||
# set no_proxy variable
|
|
||||||
export IP_DEVICE=${IP_DEVICE:-"eth0"}
|
|
||||||
MY_IP=$(ip addr show dev $IP_DEVICE | awk '/inet / {gsub("/.*", "") ; print $2}')
|
|
||||||
MY_IP_eth1=$(ip addr show dev eth1 | awk '/inet / {gsub("/.*", "") ; print $2}') || MY_IP_eth1=""
|
|
||||||
|
|
||||||
export http_proxy=""
|
|
||||||
undercloud_net_range="192.168.24."
|
|
||||||
undercloud_services_ip=$undercloud_net_range"1"
|
|
||||||
undercloud_haproxy_public_ip=$undercloud_net_range"2"
|
|
||||||
undercloud_haproxy_admin_ip=$undercloud_net_range"3"
|
|
||||||
export no_proxy=$undercloud_services_ip,$undercloud_haproxy_public_ip,$undercloud_haproxy_admin_ip,$MY_IP,$MY_IP_eth1
|
|
||||||
|
|
||||||
# finally, run quickstart
|
|
||||||
./toci_quickstart.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Run completed"
|
|
||||||
echo "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.logs.size_mb" "$(du -sm $WORKSPACE/logs | awk {'print $1'})" "$(date +%s)" | nc 66.187.229.172 2003 || true
|
|
|
@ -1,197 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -eux
|
|
||||||
set -o pipefail
|
|
||||||
export ANSIBLE_NOCOLOR=1
|
|
||||||
[[ -n ${STATS_TESTENV:-''} ]] && export STATS_TESTENV=$(( $(date +%s) - STATS_TESTENV ))
|
|
||||||
export STATS_OOOQ=$(date +%s)
|
|
||||||
|
|
||||||
LOCAL_WORKING_DIR="$WORKSPACE/.quickstart"
|
|
||||||
WORKING_DIR="$HOME"
|
|
||||||
LOGS_DIR=$WORKSPACE/logs
|
|
||||||
|
|
||||||
source $TRIPLEO_ROOT/tripleo-ci/scripts/oooq_common_functions.sh
|
|
||||||
|
|
||||||
## Signal to toci_gate_test.sh we've started by
|
|
||||||
touch /tmp/toci.started
|
|
||||||
|
|
||||||
export DEFAULT_ARGS="--extra-vars local_working_dir=$LOCAL_WORKING_DIR \
|
|
||||||
--extra-vars virthost=$UNDERCLOUD \
|
|
||||||
--inventory $LOCAL_WORKING_DIR/hosts \
|
|
||||||
--extra-vars tripleo_root=$TRIPLEO_ROOT \
|
|
||||||
--extra-vars working_dir=$WORKING_DIR \
|
|
||||||
--skip-tags "tripleo-validations,teardown-all" \
|
|
||||||
"
|
|
||||||
|
|
||||||
# --install-deps arguments installs deps and then quits, no other arguments are
|
|
||||||
# processed.
|
|
||||||
QUICKSTART_PREPARE_CMD="
|
|
||||||
./quickstart.sh
|
|
||||||
--install-deps
|
|
||||||
"
|
|
||||||
|
|
||||||
QUICKSTART_VENV_CMD="
|
|
||||||
./quickstart.sh
|
|
||||||
--bootstrap
|
|
||||||
--no-clone
|
|
||||||
--working-dir $LOCAL_WORKING_DIR
|
|
||||||
--playbook noop.yml
|
|
||||||
--retain-inventory
|
|
||||||
$UNDERCLOUD
|
|
||||||
"
|
|
||||||
|
|
||||||
QUICKSTART_INSTALL_CMD="
|
|
||||||
$LOCAL_WORKING_DIR/bin/ansible-playbook
|
|
||||||
--tags $TAGS
|
|
||||||
"
|
|
||||||
|
|
||||||
export QUICKSTART_DEFAULT_RELEASE_ARG="--extra-vars @$LOCAL_WORKING_DIR/config/release/tripleo-ci/${DISTRIBUTION:-CentOS}-${DISTRIBUTION_MAJOR_VERSION:-7}/$QUICKSTART_RELEASE.yml"
|
|
||||||
|
|
||||||
QUICKSTART_COLLECTLOGS_CMD="$LOCAL_WORKING_DIR/bin/ansible-playbook \
|
|
||||||
$LOCAL_WORKING_DIR/playbooks/collect-logs.yml \
|
|
||||||
-vv \
|
|
||||||
$QUICKSTART_DEFAULT_RELEASE_ARG \
|
|
||||||
$FEATURESET_CONF \
|
|
||||||
$ENV_VARS \
|
|
||||||
$EXTRA_VARS \
|
|
||||||
$DEFAULT_ARGS \
|
|
||||||
--extra-vars @$COLLECT_CONF \
|
|
||||||
--extra-vars artcl_collect_dir=$LOGS_DIR \
|
|
||||||
--tags all \
|
|
||||||
"
|
|
||||||
|
|
||||||
declare -A RELEASE_ARGS=()
|
|
||||||
|
|
||||||
if [[ -f "$RELEASES_FILE_OUTPUT" ]]; then
|
|
||||||
|
|
||||||
source $RELEASES_FILE_OUTPUT
|
|
||||||
|
|
||||||
declare -A RELEASE_ARGS=(
|
|
||||||
["multinode-undercloud.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$UNDERCLOUD_INSTALL_RELEASE $UNDERCLOUD_INSTALL_HASH)
|
|
||||||
["multinode-undercloud-upgrade.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$UNDERCLOUD_TARGET_RELEASE $UNDERCLOUD_TARGET_HASH)
|
|
||||||
["multinode-overcloud-prep.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$OVERCLOUD_DEPLOY_RELEASE $OVERCLOUD_DEPLOY_HASH)
|
|
||||||
["multinode-overcloud.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$OVERCLOUD_DEPLOY_RELEASE $OVERCLOUD_DEPLOY_HASH)
|
|
||||||
["multinode-overcloud-update.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$OVERCLOUD_DEPLOY_RELEASE $OVERCLOUD_DEPLOY_HASH)
|
|
||||||
["multinode-overcloud-upgrade.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$OVERCLOUD_TARGET_RELEASE $OVERCLOUD_TARGET_HASH)
|
|
||||||
["multinode-validate.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$OVERCLOUD_TARGET_RELEASE $OVERCLOUD_TARGET_HASH)
|
|
||||||
["multinode-standalone.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$STANDALONE_DEPLOY_RELEASE $STANDALONE_DEPLOY_HASH \
|
|
||||||
$STANDALONE_DEPLOY_NEWEST_HASH)
|
|
||||||
["multinode-standalone-upgrade.yml"]=$(get_extra_vars_from_release \
|
|
||||||
$STANDALONE_TARGET_RELEASE $STANDALONE_TARGET_HASH \
|
|
||||||
$STANDALONE_TARGET_NEWEST_HASH)
|
|
||||||
)
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
declare -A PLAYBOOKS_ARGS=(
|
|
||||||
["baremetal-full-overcloud.yml"]=" --extra-vars validation_args='--validation-errors-nonfatal' "
|
|
||||||
["multinode-overcloud.yml"]=" --extra-vars validation_args='--validation-errors-nonfatal' "
|
|
||||||
["multinode.yml"]=" --extra-vars validation_args='--validation-errors-nonfatal' "
|
|
||||||
)
|
|
||||||
|
|
||||||
mkdir -p $LOCAL_WORKING_DIR
|
|
||||||
# TODO(gcerami) parametrize hosts
|
|
||||||
cp $TRIPLEO_ROOT/tripleo-ci/toci-quickstart/config/testenv/${ENVIRONMENT}_hosts $LOCAL_WORKING_DIR/hosts
|
|
||||||
pushd $TRIPLEO_ROOT/tripleo-quickstart/
|
|
||||||
|
|
||||||
$QUICKSTART_PREPARE_CMD
|
|
||||||
$QUICKSTART_VENV_CMD
|
|
||||||
|
|
||||||
# Only ansible-playbook command will be used from this point forward, so we
|
|
||||||
# need some variables from quickstart.sh
|
|
||||||
OOOQ_DIR=$TRIPLEO_ROOT/tripleo-quickstart/
|
|
||||||
export OPT_WORKDIR=$LOCAL_WORKING_DIR
|
|
||||||
export ANSIBLE_CONFIG=$OOOQ_DIR/ansible.cfg
|
|
||||||
export ANSIBLE_COLLECTIONS_PATHS="$OPT_WORKDIR/share/ansible/collections:~/.ansible/collections:/usr/share/ansible/collections"
|
|
||||||
export ARA_DATABASE="sqlite:///${LOCAL_WORKING_DIR}/ara.sqlite"
|
|
||||||
export VIRTUAL_ENV_DISABLE_PROMPT=1
|
|
||||||
# Workaround for virtualenv issue https://github.com/pypa/virtualenv/issues/1029
|
|
||||||
set +u
|
|
||||||
source $LOCAL_WORKING_DIR/bin/activate
|
|
||||||
set -u
|
|
||||||
source $OOOQ_DIR/ansible_ssh_env.sh
|
|
||||||
[[ -n ${STATS_OOOQ:-''} ]] && export STATS_OOOQ=$(( $(date +%s) - STATS_OOOQ ))
|
|
||||||
|
|
||||||
# Debug step capture env variables
|
|
||||||
if [[ "$PLAYBOOK_DRY_RUN" == "1" ]]; then
|
|
||||||
echo "-- Capture Environment Variables Used ---------"
|
|
||||||
echo "$(env)" | tee -a $LOGS_DIR/toci_env_args_output.log
|
|
||||||
declare -p | tee -a $LOGS_DIR/toci_env_args_output.log
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "-- Playbooks Output --------------------------"
|
|
||||||
for playbook in $PLAYBOOKS; do
|
|
||||||
echo "$QUICKSTART_INSTALL_CMD \
|
|
||||||
${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
|
|
||||||
$NODES_ARGS \
|
|
||||||
$FEATURESET_CONF \
|
|
||||||
$ENV_VARS \
|
|
||||||
$EXTRA_VARS \
|
|
||||||
$VXLAN_VARS \
|
|
||||||
$DEFAULT_ARGS \
|
|
||||||
$LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-}" \
|
|
||||||
| sed 's/--/\n--/g' \
|
|
||||||
| tee -a $LOGS_DIR/playbook_executions.log
|
|
||||||
echo "# --------------------------------------- " \
|
|
||||||
| tee -a $LOGS_DIR/playbook_executions.log
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ "$PLAYBOOK_DRY_RUN" == "1" ]]; then
|
|
||||||
exit_value=0
|
|
||||||
else
|
|
||||||
## LOGS COLLECTION PREPARE
|
|
||||||
create_collect_logs_script
|
|
||||||
|
|
||||||
for playbook in $PLAYBOOKS; do
|
|
||||||
echo "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}"
|
|
||||||
run_with_timeout $START_JOB_TIME $QUICKSTART_INSTALL_CMD \
|
|
||||||
"${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
|
|
||||||
$NODES_ARGS \
|
|
||||||
$FEATURESET_CONF \
|
|
||||||
$ENV_VARS \
|
|
||||||
$EXTRA_VARS \
|
|
||||||
$VXLAN_VARS \
|
|
||||||
$DEFAULT_ARGS \
|
|
||||||
--extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
|
|
||||||
$LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
|
|
||||||
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
|
|
||||||
|
|
||||||
# Print status of playbook run
|
|
||||||
[[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"
|
|
||||||
[[ "$exit_value" != 0 ]] && echo "Playbook run of $playbook failed" && break
|
|
||||||
done
|
|
||||||
|
|
||||||
[[ "$exit_value" == 0 ]] && echo "Playbook run passed successfully" || echo "Playbook run failed"
|
|
||||||
|
|
||||||
## LOGS COLLECTION RUN (if applicable)
|
|
||||||
if [[ "${NODEPOOL_PROVIDER:-''}" == "rdo-cloud"* ]] || \
|
|
||||||
[[ "${NODEPOOL_PROVIDER:-''}" == "vexxhost-nodepool-tripleo" ]] ; then
|
|
||||||
if [[ "$TOCI_JOBTYPE" =~ "ovb" ]]; then
|
|
||||||
bash $LOGS_DIR/collect_logs.sh
|
|
||||||
# rename script to not to run it in multinode jobs
|
|
||||||
mv $LOGS_DIR/collect_logs.sh $LOGS_DIR/ovb_collect_logs.sh
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
sudo unbound-control dump_cache > $LOGS_DIR/dns_cache.txt
|
|
||||||
|
|
||||||
if [[ "$PERIODIC" == 1 && -e $WORKSPACE/hash_info.sh ]] ; then
|
|
||||||
echo export JOB_EXIT_VALUE=$exit_value >> $WORKSPACE/hash_info.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p $LOGS_DIR/quickstart_files
|
|
||||||
find $LOCAL_WORKING_DIR -maxdepth 1 -type f -not -name "*sqlite" | while read i; do cp -l $i $LOGS_DIR/quickstart_files/$(basename $i); done
|
|
||||||
echo 'Quickstart completed.'
|
|
||||||
exit $exit_value
|
|
Loading…
Reference in New Issue