Adopt use of pre-commit linting

Follows the same configuration that was used on
tripleo-quickstart-extras and documented use on tripleo-docs.

Change-Id: Iba8a2db92137f9f6ad28f498627eb1b87039d99f
Story: https://tree.taiga.io/project/tripleo-ci-board/task/381
This commit is contained in:
Sorin Sbarnea 2018-12-06 11:11:42 +00:00
parent 40b50f763c
commit ed27a979d5
35 changed files with 396 additions and 294 deletions

18
.ansible-lint Normal file
View File

@ -0,0 +1,18 @@
exclude_paths:
- roles/validate-ui/.travis.yml
parseable: true
rulesdir:
- ./ci-scripts/ansible_rules/
quiet: false
skip_list:
- ANSIBLE0006 # Using command rather than module we have a few use cases
# where we need to use curl and rsync
- ANSIBLE0007 # Using command rather than an argument to e.g file
# we have a lot of 'rm' command and we should use file module instead
- ANSIBLE0010 # Package installs should not use latest.
# Sometimes we need to update some packages.
- ANSIBLE0012 # Commands should not change things if nothing needs doing
- ANSIBLE0013 # Use Shell only when shell functionality is required
- ANSIBLE0016 # Tasks that run when changed should likely be handlers
# this requires refactoring roles, skipping for now
verbosity: 1

42
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,42 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: debug-statements
- id: flake8
additional_dependencies:
- hacking<1.2.0,>=1.1.0
- id: check-yaml
files: .*\.(yaml|yml)$
# commented to allow progressive enablement in smaller patches
# - repo: https://github.com/adrienverge/yamllint.git
# rev: v1.13.0
# hooks:
# - id: yamllint
# files: \.(yaml|yml)$
# types: [file, yaml]
# entry: yamllint --strict -f parsable
- repo: https://github.com/ansible/ansible-lint
rev: v3.5.1
hooks:
- id: ansible-lint
files: \.(yaml|yml)$
entry: ansible-lint --force-color -v
- repo: https://github.com/openstack-dev/bashate.git
rev: 0.6.0
hooks:
- id: bashate
entry: bashate --error . --verbose --ignore=E006,E040
# Run bashate check for all bash scripts
# Ignores the following rules:
# E006: Line longer than 79 columns (as many scripts use jinja
# templating, this is very difficult)
# E040: Syntax error determined using `bash -n` (as many scripts
# use jinja templating, this will often fail and the syntax
# error will be discovered in execution anyway)

6
.yamllint Normal file
View File

@ -0,0 +1,6 @@
---
extends: default
rules:
line-length:
max: 180

View File

@ -151,16 +151,16 @@ done
for playbook in {{ " ".join(playbooks) }}; do
echo ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}
$QUICKSTART_INSTALL_CMD \
${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
{{ nodes_args }} \
{{ featureset_conf }} \
{{ env_vars }} \
{{ extra_vars }} \
{{ vxlan_vars }} \
$DEFAULT_ARGS \
--extra-vars @{{ workspace }}/logs/zuul-variables.yaml \
$LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-} \
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
{{ nodes_args }} \
{{ featureset_conf }} \
{{ env_vars }} \
{{ extra_vars }} \
{{ vxlan_vars }} \
$DEFAULT_ARGS \
--extra-vars @{{ workspace }}/logs/zuul-variables.yaml \
$LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-} \
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
# Print status of playbook run
[[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"

View File

@ -27,7 +27,7 @@ USER=centos
# makes some assumptions but good enough for now
nova keypair-add --pub-key ~/.ssh/id_rsa.pub bighammer || true
function tapper(){
function tapper {
set -x
NODENAME=test-node-$1
@ -35,8 +35,8 @@ function tapper(){
#trap "nova delete $NODENAME" RETURN ERR
sleep 60
if [ "$(nova show $NODENAME | awk '/status/ {print $4}')" != "ACTIVE" ] ; then
nova show $NODENAME
return 1
nova show $NODENAME
return 1
fi
IP=$(nova show $NODENAME | awk '/private network/ {print $5}')

View File

@ -51,30 +51,30 @@ except:
export ELEMENTS_PATH="${COMMON_ELEMENTS_PATH}:/usr/share/instack-undercloud:/usr/share/tripleo-image-elements:/usr/share/tripleo-puppet-elements"
ELEMENTS=$(\
tripleo-build-images \
--image-json-output \
--image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].elements | map(.+" ") | add' \
| sed 's/"//g')
--image-json-output \
--image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].elements | map(.+" ") | add' \
| sed 's/"//g')
# delorean-repo is excluded b/c we've already run --repo-setup on this node and
# we don't want to overwrite that.
sudo -E instack \
-e centos7 \
enable-packages-install \
install-types \
$ELEMENTS \
-k extra-data \
pre-install \
install \
post-install \
-b 05-fstab-rootfs-label \
00-fix-requiretty \
90-rebuild-ramdisk \
00-usr-local-bin-secure-path \
-x delorean-repo \
-d
-e centos7 \
enable-packages-install \
install-types \
$ELEMENTS \
-k extra-data \
pre-install \
install \
post-install \
-b 05-fstab-rootfs-label \
00-fix-requiretty \
90-rebuild-ramdisk \
00-usr-local-bin-secure-path \
-x delorean-repo \
-d
# In the imported elements we have remove-machine-id. In multinode
# jobs that could mean we end up without /etc/machine-id. Make sure
@ -83,12 +83,12 @@ sudo -E instack \
PACKAGES=$(\
tripleo-build-images \
--image-json-output \
--image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].packages | .[] | tostring' \
| sed 's/"//g')
--image-json-output \
--image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].packages | .[] | tostring' \
| sed 's/"//g')
# Install additional packages expected by the image
sudo yum -y install $PACKAGES

View File

@ -1,3 +1,4 @@
#!/bin/bash
# Tripleo CI functions
# Revert a commit for tripleo ci
@ -5,7 +6,7 @@
# $2 : hash id of commit to revert
# $3 : bug id of reason for revert (used to skip revert if found in commit
# that triggers ci).
function temprevert(){
function temprevert {
# Before reverting check to ensure this isn't the related fix
if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
echo "Skipping temprevert because bug fix $3 was found in git message."
@ -24,7 +25,7 @@ function temprevert(){
# $2 : hash id of commit to pin too
# $3 : bug id of reason for the pin (used to skip revert if found in commit
# that triggers ci).
function pin(){
function pin {
# Before reverting check to ensure this isn't the related fix
if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
echo "Skipping pin because bug fix $3 was found in git message."
@ -42,7 +43,7 @@ function pin(){
# $2 : Gerrit refspec to cherry pick
# $3 : bug id of reason for the cherry pick (used to skip cherry pick if found
# in commit that triggers ci).
function cherrypick(){
function cherrypick {
local PROJ_NAME=$1
local REFSPEC=$2
@ -66,14 +67,14 @@ function cherrypick(){
# echo's out a project name from a ref
# $1 : e.g. openstack/nova:master:refs/changes/87/64787/3 returns nova
function filterref(){
function filterref {
PROJ=${1%%:*}
PROJ=${PROJ##*/}
echo $PROJ
}
# Mount a qcow image, copy in the delorean repositories and update the packages
function update_image(){
function update_image {
IMAGE=$1
MOUNTDIR=$(mktemp -d)
case ${IMAGE##*.} in
@ -133,7 +134,7 @@ function update_image(){
# Decide if a particular cached artifact can be used in this CI test
# Takes a single argument representing the name of the artifact being checked.
function canusecache(){
function canusecache {
# If we are uploading to the cache then we shouldn't use it
[ "$CACHEUPLOAD" == 1 ] && return 1
@ -165,7 +166,7 @@ function canusecache(){
return 0
}
function extract_logs(){
function extract_logs {
local name=$1
mkdir -p $WORKSPACE/logs/$name
local logs_tar="$WORKSPACE/logs/$name.tar.xz"
@ -178,7 +179,7 @@ function extract_logs(){
fi
}
function postci(){
function postci {
local exit_val=${1:-0}
set -x
set +e
@ -368,10 +369,10 @@ function echo_vars_to_deploy_env {
}
function stop_dstat {
ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill
ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill
}
function item_in_array () {
function item_in_array {
local item
for item in "${@:2}"; do
if [[ "$item" == "$1" ]]; then

View File

@ -1,3 +1,4 @@
#!/bin/bash
# Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs
# just have the branch they're proposed to, e.g ZUUL_BRANCH, in both
# cases we need to set STABLE_RELEASE to match for tripleo.sh

View File

@ -4,11 +4,11 @@ from __future__ import print_function
import argparse
import difflib
import json
import requests
import os
import requests
from colorama import init
from colorama import Fore
from colorama import init
GERRIT_DETAIL_API = "https://review.openstack.org/changes/{}/detail"
GERRIT_USER_NAME = "zuul"
@ -16,8 +16,9 @@ ZUUL_PIPELINE = "check"
def parse_ci_message(message):
"""Convert zuul's gerrit message into a dict with job name as key and
job url as value
"""Convert zuul's gerrit message into a dict
Dictionary contains job name as key and job url as value
"""
jobs = {}
@ -29,8 +30,7 @@ def parse_ci_message(message):
def get_file(logs_url, file):
"""Download a file from logs server for this job
"""
"""Download a file from logs server for this job"""
response = requests.get(logs_url + '/logs/' + file)
if response.ok:
@ -39,8 +39,7 @@ def get_file(logs_url, file):
def get_last_jobs(change):
"""Get the last CI jobs execution at check pipeline for this review
"""
"""Get the last CI jobs execution at check pipeline for this review"""
last_jobs = {}
detail_url = GERRIT_DETAIL_API.format(change)
@ -62,8 +61,9 @@ def get_last_jobs(change):
def download(jobs, file_path):
"""Download a file from all the specified jobs and return them as a
dictionary with job name as key and file content as value
"""Download a file from all the specified jobs
Return them as a dictionary with job name as key and file content as value
"""
downloaded_files = {}
for job, logs in jobs.iteritems():
@ -76,9 +76,7 @@ def download(jobs, file_path):
def is_equal(lho_jobs, rho_jobs, file_path):
"""Check the differences of file_path between the lho and rho job sets and
print out them
"""
"""Prints differences of file_path between the lho and rho job sets"""
lho_files = download(lho_jobs, file_path)
rho_files = download(rho_jobs, file_path)

View File

@ -11,9 +11,9 @@ echo puppetlabs-apache adrien-filemapper | xargs -n 1 puppet module install
git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo
if [ -e /sys/class/net/eth1 ] ; then
echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
ifdown eth1
ifup eth1
echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
ifdown eth1
ifup eth1
fi
CIREPO=/opt/stack/tripleo-ci

View File

@ -1,3 +1,4 @@
#!/bin/bash
set -eux
set -o pipefail
@ -305,9 +306,9 @@ if [ "$OSINFRA" = "0" ]; then
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.register.nodes.seconds"
if [ $INTROSPECT == 1 ] ; then
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
fi
if [ $PREDICTABLE_PLACEMENT == 1 ]; then
@ -451,7 +452,7 @@ if [ "$MULTINODE" == 0 ] && [ "$OVERCLOUD" == 1 ] ; then
echo "crm_resource for openstack-heat-engine has failed!"
exit $exitcode
}
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
fi
fi
@ -464,10 +465,10 @@ if [ "$OVERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
# and thus the contents of delorean-ci may contain packages
# we want to test for the current branch on upgrade
if [ -s /etc/nodepool/sub_nodes_private ]; then
for ip in $(cat /etc/nodepool/sub_nodes_private); do
ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo
done
for ip in $(cat /etc/nodepool/sub_nodes_private); do
ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo
done
fi
source ~/stackrc

View File

@ -6,12 +6,12 @@ set -o pipefail
TMPFILE=$(mktemp)
TMP2FILE=$(mktemp)
function heat_resource_metadata() {
# Build os-collect-config command line arguments for the given heat
# resource, which when run, allow us to collect the heat completion
# signals.
heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/, / --cfn-/g' -e 's/^ /--cfn-/' -e 's/$/ --print/'
echo
function heat_resource_metadata {
# Build os-collect-config command line arguments for the given heat
# resource, which when run, allow us to collect the heat completion
# signals.
heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/, / --cfn-/g' -e 's/^ /--cfn-/' -e 's/$/ --print/'
echo
}
>$TMPFILE

View File

@ -1,36 +1,36 @@
#!/bin/bash
set -eu -o pipefail
function usage(){
echo "Helper script for downloading tripleo-ci jobs logs"
echo
echo "Example:"
echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef"
echo
echo "Downloads the logs and starts a shell from the logs root directory"
function usage {
echo "Helper script for downloading tripleo-ci jobs logs"
echo
echo "Example:"
echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef"
echo
echo "Downloads the logs and starts a shell from the logs root directory"
}
function finish(){
rc=${rc:-$?}
trap - EXIT
cd $TDIR/../
echo "Download job exited ${rc}"
PS1="JOBLOGS ]\$ " bash --noprofile --norc
function finish {
rc=${rc:-$?}
trap - EXIT
cd $TDIR/../
echo "Download job exited ${rc}"
PS1="JOBLOGS ]\$ " bash --noprofile --norc
}
function get_dirs(){
local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
local directories=""
directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")
if [ -n "$directories" ]; then
for d in $directories; do
directories="$directories $(get_dirs $d/)"
done
echo $directories
else
echo ""
fi
return 0
function get_dirs {
local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
local directories=""
directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")
if [ -n "$directories" ]; then
for d in $directories; do
directories="$directories $(get_dirs $d/)"
done
echo $directories
else
echo ""
fi
return 0
}
[[ "${1:--}" =~ ^\s+?- ]] && (usage; exit 1)
@ -42,12 +42,12 @@ BASEURL=${1%/}
SC=$(dirname $BASEURL | grep -o \/ | wc -w)
if [[ $BASEURL =~ 'logs.rdoproject' && SC -le 9 ]] ||\
[[ $BASEURL =~ 'logs.rdoproject.org/openstack-periodic' && SC -le 5 ]]; then
console="$BASEURL/console.txt.gz"
console="$BASEURL/console.txt.gz"
elif [[ ! $(basename $BASEURL) == 'logs' && SC -le 7 ]]; then
console="$BASEURL/job-output.txt.gz"
BASEURL=${BASEURL}/logs
console="$BASEURL/job-output.txt.gz"
BASEURL=${BASEURL}/logs
else
console=''
console=''
fi
TDIR=${BASEURL##*http://}
TDIR=${TDIR##*https://}
@ -59,18 +59,18 @@ echo "Target dir for download: $TDIR"
echo Will download logs from the following URLs:
list_to_get="$console $(get_dirs $BASEURL/)"
for d in $list_to_get; do
echo $d
echo $d
done
rm -f wget-jobs.txt
for d in $list_to_get; do
args="\"-nv -nc --no-use-server-timestamps \
--accept-regex='\.txt\.gz$|messages$' \
--reject='index.html*' \
--recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \
--no-parent \
-erobots=off --wait 0.25 ${d}\""
echo "${args}" >> wget-jobs.txt
args="\"-nv -nc --no-use-server-timestamps \
--accept-regex='\.txt\.gz$|messages$' \
--reject='index.html*' \
--recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \
--no-parent \
-erobots=off --wait 0.25 ${d}\""
echo "${args}" >> wget-jobs.txt
done
cat wget-jobs.txt | sed -n '{p;p}' | shuf > wget-jobs-shuf.txt

View File

@ -1,3 +1,4 @@
#!/bin/bash
export METRICS_START_TIMES=/tmp/metric-start-times
export METRICS_DATA_FILE=/tmp/metrics-data
@ -17,9 +18,12 @@ function record_metric {
# called. NOTE: time metrics names must be unique.
function start_metric {
local NAME=$1
local START_TIME=$(date +%s)
local METRIC_NAME
local START_TIME
START_TIME=$(date +%s)
# we use : as our delimiter so convert to _. Also convert spaces and /'s.
local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
if grep -c "^$METRIC_NAME:" $METRICS_START_TIMES &>/dev/null; then
echo "start_metric has already been called for $NAME" >&2
@ -33,18 +37,23 @@ function start_metric {
# The total time (in seconds) is calculated and logged to the metrics
# data file. NOTE: the end time is used as the DTS.
function stop_metric {
local END_TIME
local LINE
local METRIC_NAME
local NAME=$1
local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
local END_TIME=$(date +%s)
local START_TIME
local TOTAL_TIME
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
END_TIME=$(date +%s)
if ! grep -c "^$METRIC_NAME" $METRICS_START_TIMES &>/dev/null; then
echo "Please call start_metric before calling stop_metric for $NAME" >&2
exit 1
fi
local LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
local START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
local TOTAL_TIME="$(($END_TIME - $START_TIME))"
LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
TOTAL_TIME="$(($END_TIME - $START_TIME))"
record_metric "$METRIC_NAME" "$TOTAL_TIME" "$END_TIME"
}
function metrics_to_graphite {

View File

@ -6,31 +6,31 @@ MIRRORURL="https://images.rdoproject.org/${RELEASE}/delorean/current-tripleo"
IMAGES="overcloud-full.tar ironic-python-agent.tar"
function check_new_image {
local img=$1
wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || {
echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue"
exit 1
}
diff -q test_md5 ${img}.md5 >/dev/null
local img=$1
wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || {
echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue"
exit 1
}
diff -q test_md5 ${img}.md5 >/dev/null
}
function update_images {
for img in $IMAGES; do
wget ${MIRRORURL}/${img} -O ${img}-${RELEASE}
wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5
down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})"
real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})"
if [[ "$down_md5" == "$real_md5" ]]; then
mv -f ${img}-${RELEASE} ${img}
mv -f ${img}-${RELEASE}.md5 ${img}.md5
else
echo "md5 doesn't match, image download was broken!"
echo "Calculated md5 is $real_md5 and downloaded is $down_md5"
rm -f "${img}-${RELEASE}"
rm -f "${img}-${RELEASE}.md5"
fi
done
wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null
for img in $IMAGES; do
wget ${MIRRORURL}/${img} -O ${img}-${RELEASE}
wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5
down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})"
real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})"
if [[ "$down_md5" == "$real_md5" ]]; then
mv -f ${img}-${RELEASE} ${img}
mv -f ${img}-${RELEASE}.md5 ${img}.md5
else
echo "md5 doesn't match, image download was broken!"
echo "Calculated md5 is $real_md5 and downloaded is $down_md5"
rm -f "${img}-${RELEASE}"
rm -f "${img}-${RELEASE}.md5"
fi
done
wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null
}
mkdir -p $BUILDS

View File

@ -1,16 +1,18 @@
#!/bin/python3.4
from builtins import FileExistsError
import cgi
import cgitb
import fcntl
import os
import shutil
import sys
import tempfile
basedir="/var/www/html/"
basedir = "/var/www/html/"
print("Content-Type: text/html\n")
def saveform(form, storagedir):
for key in form.keys():
entry = form[key]
@ -36,6 +38,7 @@ def saveform(form, storagedir):
fp.write(line)
fp.close()
def run():
if not os.environ.get("REMOTE_ADDR", "").startswith("192.168."):
@ -73,5 +76,5 @@ def run():
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
sys.exit(run())
sys.exit(run())

View File

@ -65,7 +65,7 @@ function is_featureset {
local type="${1}"
local featureset_file="${2}"
[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]
[[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]]
}
function run_with_timeout {

View File

@ -21,7 +21,7 @@ set -x
# NOTE(bnemec): This function starts the port deletions in the background.
# To ensure they complete before you proceed, you must call "wait" after
# calling this function.
function delete_ports() {
function delete_ports {
local subnetid=${1:-}
if [ -z "$subnetid" ]; then
return
@ -37,8 +37,7 @@ CONSOLE_LOG_PATH=/var/www/html/tebroker/console-logs/
nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs
# Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes.
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM
do
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM ; do
openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' | gzip > $CONSOLE_LOG_PATH/$server-console.log.gz || true
done
@ -55,7 +54,7 @@ wait
# If there was a keypair for this specific run, delete it.
openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
function delete_stack() {
function delete_stack {
local stackname=$1
# Nothing to do if the specified stack doesn't exist
if ! heat stack-show $stackname; then

View File

@ -23,8 +23,8 @@ import json
import logging
import logging.handlers
import os
import sys
import subprocess
import sys
import tempfile
import threading
import time
@ -35,9 +35,11 @@ from novaclient import client as novaclient
from novaclient import exceptions
# 100Mb log files
maxBytes=1024*1024*100
maxBytes = 1024*1024*100
logging.basicConfig(filename="/var/www/html/tebroker/testenv-worker.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(
filename="/var/www/html/tebroker/testenv-worker.log",
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CallbackClient(gear.Client):
@ -86,7 +88,7 @@ class TEWorkerThread(threading.Thread):
self.runJob()
except gear.InterruptedError:
logger.info('getJob interrupted...')
except:
except Exception:
logger.exception('Error while run_te_worker worker')
self.running = False
@ -132,17 +134,18 @@ class TEWorkerThread(threading.Thread):
with tempfile.NamedTemporaryFile('r') as fp:
os.environ["TE_DATAFILE"] = fp.name
logger.info(
subprocess.check_output([self.scriptfiles[0],
self.num,
arguments.get("envsize","2"),
arguments.get("ucinstance",""),
arguments.get("create_undercloud", ""),
arguments.get("ssh_key", ""),
arguments.get("net_iso", "multi-nic"),
arguments.get("compute_envsize","0"),
arguments.get("extra_nodes", "0"),
],
stderr=subprocess.STDOUT))
subprocess.check_output([
self.scriptfiles[0],
self.num,
arguments.get("envsize", "2"),
arguments.get("ucinstance", ""),
arguments.get("create_undercloud", ""),
arguments.get("ssh_key", ""),
arguments.get("net_iso", "multi-nic"),
arguments.get("compute_envsize", "0"),
arguments.get("extra_nodes", "0"),
],
stderr=subprocess.STDOUT))
clientdata = fp.read()
except subprocess.CalledProcessError as e:
logger.error(e.output)
@ -164,7 +167,8 @@ class TEWorkerThread(threading.Thread):
if not cb_job.running:
logger.error("No sign of the Callback job starting,"
"assuming its no longer present")
clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
logger.info(clientdata)
client.shutdown()
return
@ -182,7 +186,8 @@ class TEWorkerThread(threading.Thread):
else:
logger.info('Returned from Job : %s', cb_job.data)
try:
clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.error(e.output)
raise
@ -238,7 +243,7 @@ def _check_instance_alive(nclient, instance, event):
"""
if instance:
try:
i = nclient.servers.get(instance)
nclient.servers.get(instance)
except exceptions.NotFound:
# There is a very brief period of time where instance could be set
# and event not. It's unlikely to happen, but let's be safe.
@ -254,8 +259,10 @@ def main(args=sys.argv[1:]):
'"locked" state while it calls back to the client. The '
'clients job is provided with data (contents of datafile)'
)
parser.add_argument('scriptfiles', nargs=2,
help='Path to a script whos output is provided to the client')
parser.add_argument(
'scriptfiles',
nargs=2,
help='Path to a script whos output is provided to the client')
parser.add_argument('--timeout', '-t', type=int, default=10800,
help='The maximum number of seconds to hold the '
'testenv for, can be overridden by the client.')
@ -271,7 +278,10 @@ def main(args=sys.argv[1:]):
global logger
logger = logging.getLogger('testenv-worker-' + opts.tenum)
logger.addHandler(logging.handlers.RotatingFileHandler("/var/www/html/tebroker/testenv-worker.log", maxBytes=maxBytes, backupCount=5))
logger.addHandler(logging.handlers.RotatingFileHandler(
"/var/www/html/tebroker/testenv-worker.log",
maxBytes=maxBytes,
backupCount=5))
logger.setLevel(logging.INFO)
logger.removeHandler(logger.handlers[0])
@ -279,7 +289,11 @@ def main(args=sys.argv[1:]):
logger.setLevel(logging.DEBUG)
logger.info('Starting test-env worker with data %r', opts.scriptfiles)
te_worker = TEWorkerThread(opts.geard, opts.tenum, opts.timeout, opts.scriptfiles)
te_worker = TEWorkerThread(
opts.geard,
opts.tenum,
opts.timeout,
opts.scriptfiles)
te_worker.start()

View File

@ -9,8 +9,7 @@ function set_env {
# The updates job already takes a long time, always use cache for it
[[ "$TOCI_JOBTYPE" =~ updates ]] && set_env "false"
# There are some projects that require images building
for PROJFULLREF in ${ZUUL_CHANGES//^/ };
do
for PROJFULLREF in ${ZUUL_CHANGES//^/ }; do
PROJ=${PROJFULLREF%%:*};
PROJ=${PROJ##*/};
[[ "$PROJ" =~ diskimage-builder|tripleo-image-elements|tripleo-puppet-elements|instack-undercloud|python-tripleoclient|tripleo-common ]] && set_env "true"

View File

@ -167,11 +167,11 @@ NODEPOOL_RDO_PROXY=${NODEPOOL_RDO_PROXY:-https://trunk.rdoproject.org}
NODEPOOL_BUILDLOGS_CENTOS_PROXY="${NODEPOOL_BUILDLOGS_CENTOS_PROXY:-https://buildlogs.centos.org}"
NODEPOOL_CBS_CENTOS_PROXY="${NODEPOOL_CBS_CENTOS_PROXY:-https://cbs.centos.org/repos}"
OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
$REPO_PREFIX/$CEPH_REPO_FILE"
$REPO_PREFIX/$CEPH_REPO_FILE"
OPSTOOLS_REPO_ENABLED=${OPSTOOLS_REPO_ENABLED:-"0"}
if [[ "${OPSTOOLS_REPO_ENABLED}" = 1 ]]; then
OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
$REPO_PREFIX/centos-opstools.repo"
OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
$REPO_PREFIX/centos-opstools.repo"
fi
FEATURE_BRANCH=${FEATURE_BRANCH:-}
DELOREAN_SETUP=${DELOREAN_SETUP:-""}
@ -250,7 +250,11 @@ function log {
}
function source_rc {
if [ $1 = "stackrc" ] ; then cloud="Undercloud"; else cloud="Overcloud"; fi
if [ $1 = "stackrc" ]; then
cloud="Undercloud"
else
cloud="Overcloud"
fi
echo "You must source a $1 file for the $cloud."
echo "Attempting to source $HOME/$1"
source $HOME/$1
@ -665,8 +669,7 @@ function overcloud_deploy {
exitval=0
log "Deploy command arguments: $OVERCLOUD_DEPLOY_ARGS"
openstack overcloud deploy $OVERCLOUD_DEPLOY_ARGS || exitval=1
if [ $exitval -eq 1 ];
then
if [ $exitval -eq 1 ]; then
log "Overcloud create - FAILED!"
exit 1
fi
@ -713,8 +716,7 @@ function overcloud_update {
log "Overcloud update started."
exitval=0
openstack overcloud deploy $OVERCLOUD_UPDATE_ARGS || exitval=1
if [ $exitval -eq 1 ];
then
if [ $exitval -eq 1 ]; then
log "Overcloud update - FAILED!"
exit 1
fi

View File

@ -30,7 +30,7 @@ and Lauchpad API connections) and generate the CI reports which contact
the API of the upstream Jenkins servers.
If you want to do a quick build to test out new HTML formatting, etc. you
can disable the reviewday and CI reports by running the following:
can disable the reviewday and CI reports by running the following:
cd tripleo-ci/scripts/website
SKIP\_REVIEWDAY="Y" SKIP\_CI\_REPORTS="Y" OUT\_HTML='out\_html' bash generate\_site.sh

View File

@ -24,32 +24,32 @@ SKIP_BLOG=${SKIP_BLOG:-''}
# TRIPLEO-DOCS
if [ ! -d tripleo-docs ]; then
git clone git://git.openstack.org/openstack/tripleo-docs
pushd tripleo-docs
tox -edocs #initial run
popd
git clone git://git.openstack.org/openstack/tripleo-docs
pushd tripleo-docs
tox -edocs #initial run
popd
else
pushd tripleo-docs
git reset --hard origin/master
git pull
# NOTE(bnemec): We need to rebuild this venv each time or changes to
# tripleosphinx won't be picked up.
tox -re docs
popd
pushd tripleo-docs
git reset --hard origin/master
git pull
# NOTE(bnemec): We need to rebuild this venv each time or changes to
# tripleosphinx won't be picked up.
tox -re docs
popd
fi
# TRIPLEO SPHINX
if [ ! -d tripleosphinx ]; then
git clone https://github.com/dprince/tripleosphinx.git
pushd tripleosphinx
tox -edocs #creates the blank.html
popd
git clone https://github.com/dprince/tripleosphinx.git
pushd tripleosphinx
tox -edocs #creates the blank.html
popd
else
pushd tripleosphinx
git reset --hard origin/master
git pull
tox -edocs #creates the blank.html
popd
pushd tripleosphinx
git reset --hard origin/master
git pull
tox -edocs #creates the blank.html
popd
fi
# swap in custom tripleosphinx
@ -60,33 +60,33 @@ popd
#REVIEWDAY
if [ ! -d reviewday ]; then
git clone git://git.openstack.org/openstack-infra/reviewday
git clone git://git.openstack.org/openstack-infra/reviewday
else
pushd reviewday
git reset --hard origin/master
git pull
popd
pushd reviewday
git reset --hard origin/master
git pull
popd
fi
#TRIPLEO CI
if [ ! -d tripleo-ci ]; then
git clone git://git.openstack.org/openstack-infra/tripleo-ci
git clone git://git.openstack.org/openstack-infra/tripleo-ci
else
pushd tripleo-ci
git reset --hard origin/master
git pull
popd
pushd tripleo-ci
git reset --hard origin/master
git pull
popd
fi
#Planet (Blog Feed Aggregator)
PLANET_DIR='planet-venus'
if [ ! -d '$PLANET_DIR' ]; then
git clone https://github.com/rubys/venus.git $PLANET_DIR
git clone https://github.com/rubys/venus.git $PLANET_DIR
else
pushd $PLANET_DIR
git reset --hard origin/master
git pull
popd
pushd $PLANET_DIR
git reset --hard origin/master
git pull
popd
fi
#-----------------------------------------
@ -104,54 +104,54 @@ $SUDO_CP mkdir -p $OUT_HTML
# Reviewday
if [ -z "$SKIP_REVIEWDAY" ]; then
pushd reviewday
tox -erun -- "-p $REVIEWDAY_INPUT_FILE"
$SUDO_CP cp -a arrow* out_report/*.png out_report/*.js out_report/*.css $OUT_HTML
DATA=$(cat out_report/data_table.html)
popd
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/reviews.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>TripleO Reviews</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>TripleO: Reviews</title>|" -i $OUT_FILE # custom title
sed -e "s|<title>.*|<title>TripleO: Reviews</title><meta name='description' content='OpenStack Deployment Program Reviews'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
pushd reviewday
tox -erun -- "-p $REVIEWDAY_INPUT_FILE"
$SUDO_CP cp -a arrow* out_report/*.png out_report/*.js out_report/*.css $OUT_HTML
DATA=$(cat out_report/data_table.html)
popd
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/reviews.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>TripleO Reviews</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>TripleO: Reviews</title>|" -i $OUT_FILE # custom title
sed -e "s|<title>.*|<title>TripleO: Reviews</title><meta name='description' content='OpenStack Deployment Program Reviews'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
fi
# TripleO CI
if [ -z "$SKIP_CI_REPORTS" ]; then
pushd tripleo-ci
pushd tripleo-ci
# jobs report
tox -ecireport -- -b '^.*'
DATA=$(cat tripleo-jobs.html-table)
popd
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/cistatus.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>TripleO CI Status</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>TripleO: CI Status</title><meta name='description' content='OpenStack Deployment Program CI Status results'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
# jobs report
tox -ecireport -- -b '^.*'
DATA=$(cat tripleo-jobs.html-table)
popd
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/cistatus.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>TripleO CI Status</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>TripleO: CI Status</title><meta name='description' content='OpenStack Deployment Program CI Status results'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
fi
# Planet
if [ -z "$SKIP_BLOG" ]; then
cp $SCRIPT_DIR/tripleo-ci/scripts/website/planet* $SCRIPT_DIR/$PLANET_DIR
pushd $SCRIPT_DIR/$PLANET_DIR
mkdir output
rm planet.html.tmplc # cleanup from previous runs
python planet.py planet.config.ini
popd
DATA=$(cat $PLANET_DIR/output/planet.html)
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/planet.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>Planet TripleO</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>Planet TripleO</title><meta name='description' content='OpenStack Deployment Program Planet'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
cp $SCRIPT_DIR/tripleo-ci/scripts/website/planet* $SCRIPT_DIR/$PLANET_DIR
pushd $SCRIPT_DIR/$PLANET_DIR
mkdir output
rm planet.html.tmplc # cleanup from previous runs
python planet.py planet.config.ini
popd
DATA=$(cat $PLANET_DIR/output/planet.html)
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/planet.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>Planet TripleO</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>Planet TripleO</title><meta name='description' content='OpenStack Deployment Program Planet'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
fi
# Copy in the new web pages

View File

@ -1,5 +1,6 @@
---
parameter_defaults:
ControlPlaneSubnetCidr: "24"
ControlPlaneDefaultRoute: 192.168.24.1
EC2MetadataIp: 192.168.24.1
DnsServers: ["8.8.8.8","8.8.4.4"]
DnsServers: ["8.8.8.8", "8.8.4.4"]

View File

@ -25,7 +25,7 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
@ -62,18 +62,18 @@ parameters:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string

View File

@ -1,3 +1,4 @@
---
parameter_defaults:
ControllerSchedulerHints:
'capabilities:node': 'controller-%index%'

View File

@ -1,8 +1,8 @@
flake8
pytest
pytest-html
pytest-cov
mock
requests
pprint
PyYAML
pre-commit>=1.10 # MIT License
PyYAML

View File

@ -21,9 +21,9 @@
import argparse
import json
import logging
import sys
import subprocess
import os
import subprocess
import sys
import tempfile
import textwrap
import threading
@ -65,7 +65,8 @@ class TestCallback(object):
if time_waiting > 90:
logger.warn('%.1f seconds waiting for a worker.' % (time_waiting))
if "Couldn't retrieve env" in job.arguments or "Failed creating OVB stack" in job.arguments:
if "Couldn't retrieve env" in job.arguments or \
"Failed creating OVB stack" in job.arguments:
logger.error(job.arguments)
self.rv = 2
job.sendWorkComplete("")
@ -80,7 +81,7 @@ class TestCallback(object):
try:
self.rv = subprocess.call(self.command)
except:
except Exception:
logger.exception("Error calling command")
self.rv = 2
@ -195,14 +196,14 @@ def main(args=sys.argv[1:]):
job_params = {
"callback_name": callback_name,
"timeout": opts.timeout,
"envsize":opts.envsize,
"compute_envsize":opts.compute_envsize,
"ucinstance":opts.ucinstance,
"envsize": opts.envsize,
"compute_envsize": opts.compute_envsize,
"ucinstance": opts.ucinstance,
"create_undercloud": "true" if opts.create_undercloud else "",
"ssh_key":opts.ssh_key,
"net_iso":opts.net_iso,
"extra_nodes":opts.extra_nodes,
"job_identifier":job_identifier,
"ssh_key": opts.ssh_key,
"net_iso": opts.net_iso,
"extra_nodes": opts.extra_nodes,
"job_identifier": job_identifier,
}
job = gear.Job('lockenv', json.dumps(job_params))
client.submitJob(job)
@ -227,5 +228,6 @@ def main(args=sys.argv[1:]):
logger.debug("Exiting with status : %d", cb.rv)
return cb.rv
if __name__ == '__main__':
exit(main())

View File

@ -1,3 +1,4 @@
---
# Collect logs settings
# artcl_tar_gz: true

View File

@ -1,3 +1,4 @@
---
# TRIPLEO-CI environment settings
undercloud_user: "{{ lookup('env','USER') }}"
non_root_user: "{{ undercloud_user }}"

View File

@ -1,3 +1,4 @@
---
undercloud_type: ovb
use_testenv_broker: true
build_test_packages: true

View File

@ -151,16 +151,16 @@ else
for playbook in $PLAYBOOKS; do
echo "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}"
run_with_timeout $START_JOB_TIME $QUICKSTART_INSTALL_CMD \
"${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
$NODES_ARGS \
$FEATURESET_CONF \
$ENV_VARS \
$EXTRA_VARS \
$VXLAN_VARS \
$DEFAULT_ARGS \
--extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
$LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
"${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
$NODES_ARGS \
$FEATURESET_CONF \
$ENV_VARS \
$EXTRA_VARS \
$VXLAN_VARS \
$DEFAULT_ARGS \
--extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
$LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
# Print status of playbook run
[[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"

View File

@ -18,7 +18,8 @@ commands = pyflakes setup.py scripts
[testenv:linters]
basepython = python3
whitelist_externals = bash
commands = flake8 --max-line-length 80 {toxinidir} {posargs}
commands = python -m pre_commit run --source HEAD^ --origin HEAD
# deprecated: use linters instead. kept only as a convenience alias
[testenv:pep8]

View File

@ -55,8 +55,8 @@
gate:
queue: tripleo
jobs:
# Don't put a files section on the linters job, otherwise no
# jobs might be defined and nothing can merge in this repo.
# Don't put a files section on the linters job, otherwise no
# jobs might be defined and nothing can merge in this repo.
- openstack-tox-linters
- openstack-tox-py27:
files:

View File

@ -1,3 +1,4 @@
---
- nodeset:
name: two-centos-7-nodes
nodes: