Remove devstack upgrade and gate (for now).

This commit is contained in:
Pino de Candia 2018-01-26 17:32:47 -06:00
parent 7f55b15f63
commit 031f13edbd
12 changed files with 0 additions and 1520 deletions

View File

@ -1,318 +0,0 @@
#!/usr/bin/env bash
# **designate.sh**
# Simple Tests to verify designate is running
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
echo "*********************************************************************"
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Keep track of the current directory
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
DEVSTACK_DIR=$(cd $SCRIPT_DIR/../..; pwd)/devstack
if [ -x "$HOME/devstack/stack.sh" ]; then
DEVSTACK_DIR=$HOME/devstack/
fi
# Import common functions
source $DEVSTACK_DIR/functions
# Import configuration
source $DEVSTACK_DIR/openrc admin admin
# Import exercise configuration
source $DEVSTACK_DIR/exerciserc
# Skip if designate is not enabled
is_service_enabled designate || exit 55
# Import settings + designate library
source $SCRIPT_DIR/plugin.sh
# Settings
# ========
source $SCRIPT_DIR/settings
# Used with dig to look up in DNS
DIG_TIMEOUT=30
if [ "$DESIGNATE_BACKEND_DRIVER" == "akamai" ]; then
# Akamai can be slow to propagate changes out
DIG_TIMEOUT=300
fi
# used with dig to look up in DNS
DIG_FLAGS="-p $DESIGNATE_SERVICE_PORT_DNS @$DESIGNATE_SERVICE_HOST"
# used with dig to do an AXFR against MDNS
DIG_AXFR_FLAGS="-p $DESIGNATE_SERVICE_PORT_MDNS @$DESIGNATE_SERVICE_HOST AXFR +tcp +nocmd"
# Functions
# =========
function cleanup {
# Try to cleanup any domains, this is important for backends like
# Akamai/Dyn, where state is not fully reset between test runs.
source $DEVSTACK_DIR/openrc admin admin
designate --all-tenants domain-list -f csv | awk 'BEGIN { FS = "," } ; {print $1}' | \
tail -n+2 | xargs --no-run-if-empty -n1 designate --all-tenants domain-delete
}
trap cleanup EXIT
function ensure_record_present {
local record_name=$1
local record_type=$2
local record_value=$3
if [ "$DESIGNATE_BACKEND_DRIVER" = "fake" ] ; then
# if the backend is fake, there will be no actual DNS records
return 0
fi
if ! timeout $DIG_TIMEOUT sh -c "while ! dig +short $DIG_FLAGS $record_name $record_type | grep \"$record_value\"; do sleep 1; done"; then
die $LINENO "Error: record $record_name ($record_type) not found in DNS"
fi
# Display for debugging
dig $DIG_FLAGS $record_name $record_type
return 0
}
function ensure_record_absent {
local record_name=$1
local record_type=$2
local record_value=$3
if [ "$DESIGNATE_BACKEND_DRIVER" = "fake" ] ; then
# if the backend is fake, there will be no actual DNS records
return 0
fi
if ! timeout $DIG_TIMEOUT sh -c "while dig +short $DIG_FLAGS $record_name $record_type | grep \"$record_value\"; do sleep 1; done"; then
# Display for debugging
dig $DIG_FLAGS $record_name $record_type
die $LINENO "Error: record $record_name ($record_type) found in DNS, should be absent"
fi
return 0
}
# do an AXFR request to MDNS
# if it does not match the expected value, give an error
function verify_axfr_in_mdns {
# Display for debugging
dig $DIG_AXFR_FLAGS "$1"
if dig $DIG_AXFR_FLAGS "$1"; then
if [ -n "$2" ] ; then
local axfr_records=$(dig $DIG_AXFR_FLAGS "$1" | grep "$1" | wc -l)
if [ "$axfr_records" = "$2" ] ; then
return 0
else
die $LINENO "Error: AXFR to MDNS did not return the expected number of records"
fi
fi
return 0
else
die $LINENO "Error: AXFR to MDNS did not return a correct response"
fi
}
# get the domain id (uuid) given the domain name
# if REQUIRED is set, die with an error if name not found
function get_domain_id {
local domain_name=$1
local required=$2
local domain_id=$(designate domain-list | egrep " $domain_name " | get_field 1)
if [ "$required" = "1" ] ; then
die_if_not_set $LINENO domain_id "Failure retrieving DOMAIN_ID"
fi
echo "$domain_id"
}
# get the domain_name given the id
function get_domain_name {
designate domain-list | grep "$1" | get_field 2
}
# if the given domain does not exist, it will be created
# the domain_id of the domain will be returned
function get_or_create_domain_id {
local domainid=$(get_domain_id "$1")
if [[ -z "$domainid" ]]; then
designate domain-create --name $1 --email admin@devstack.org --ttl 86400 --description "domain $1" 1>&2
domainid=$(designate domain-list | grep "$1" | get_field 1)
fi
echo $domainid
}
# get the record id (uuid) given the record name and domain id
# if REQUIRED is set, die with an error if name not found
function get_record_id {
local domain_id=$1
local record_name=$2
local record_type=$3
local required=$4
local record_id=$(designate record-list $domain_id | egrep " $record_name " | egrep " $record_type " | get_field 1)
if [ "$required" = "1" ] ; then
die_if_not_set $LINENO record_id "Failure retrieving RECORD_ID"
fi
echo "$record_id"
}
# Testing Servers
# ===============
designate server-list
# NUMBER_OF_RECORDS keeps track of the records we need to get for AXFR
# We start with the number of NS lines returned from server list
# (Header line makes up for SOA + Number of NS record lines)
NUMBER_OF_RECORDS=$(designate server-list -f csv | wc -l)
# Add 1 extra to account for the additional SOA at the end of the AXFR
((NUMBER_OF_RECORDS+=1))
# Testing Domains
# ===============
# List domains
designate domain-list
# Create random domain name
DOMAIN_NAME="exercise-$(openssl rand -hex 4).com."
# Create the domain
designate domain-create --name $DOMAIN_NAME --email devstack@example.org
DOMAIN_ID=$(get_domain_id $DOMAIN_NAME 1)
# Fetch the domain
designate domain-get $DOMAIN_ID
# List the nameservers hosting the domain
designate domain-servers-list $DOMAIN_ID
# Testing Records
# ===============
# Create random record name
A_RECORD_NAME="$(openssl rand -hex 4).${DOMAIN_NAME}"
# Create an A record
designate record-create $DOMAIN_ID --name $A_RECORD_NAME --type A --data 127.0.0.1
((NUMBER_OF_RECORDS++))
A_RECORD_ID=$(get_record_id $DOMAIN_ID $A_RECORD_NAME A)
# Fetch the record
designate record-get $DOMAIN_ID $A_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $A_RECORD_NAME A 127.0.0.1
# -----
# Create random record name
AAAA_RECORD_NAME="$(openssl rand -hex 4).${DOMAIN_NAME}"
# Create an AAAA record
designate record-create $DOMAIN_ID --name $AAAA_RECORD_NAME --type AAAA --data "2607:f0d0:1002:51::4"
((NUMBER_OF_RECORDS++))
AAAA_RECORD_ID=$(get_record_id $DOMAIN_ID $AAAA_RECORD_NAME AAAA)
# Fetch the record
designate record-get $DOMAIN_ID $AAAA_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $AAAA_RECORD_NAME AAAA 2607:f0d0:1002:51::4
# -----
# Create a MX record
designate record-create $DOMAIN_ID --name $DOMAIN_NAME --type MX --priority 5 --data "mail.example.com."
((NUMBER_OF_RECORDS++))
MX_RECORD_ID=$(get_record_id $DOMAIN_ID $DOMAIN_NAME MX)
# Fetch the record
designate record-get $DOMAIN_ID $MX_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $DOMAIN_NAME MX "5 mail.example.com."
# -----
# Create a SRV record
designate record-create $DOMAIN_ID --name _sip._tcp.$DOMAIN_NAME --type SRV --priority 10 --data "5 5060 sip.example.com."
((NUMBER_OF_RECORDS++))
SRV_RECORD_ID=$(get_record_id $DOMAIN_ID _sip._tcp.$DOMAIN_NAME SRV)
# Fetch the record
designate record-get $DOMAIN_ID $SRV_RECORD_ID
# Verify the record is published in DNS
ensure_record_present _sip._tcp.$DOMAIN_NAME SRV "10 5 5060 sip.example.com."
# -----
# Create random record name
CNAME_RECORD_NAME="$(openssl rand -hex 4).${DOMAIN_NAME}"
# Create a CNAME record
designate record-create $DOMAIN_ID --name $CNAME_RECORD_NAME --type CNAME --data $DOMAIN_NAME
((NUMBER_OF_RECORDS++))
CNAME_RECORD_ID=$(get_record_id $DOMAIN_ID $CNAME_RECORD_NAME CNAME)
# Fetch the record
designate record-get $DOMAIN_ID $CNAME_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $CNAME_RECORD_NAME CNAME $DOMAIN_NAME
# -----
# List Records
designate record-list $DOMAIN_ID
# Send an AXFR to MDNS and check for the records returned
verify_axfr_in_mdns $DOMAIN_NAME $NUMBER_OF_RECORDS
# -----
# Delete a Record
designate record-delete $DOMAIN_ID $CNAME_RECORD_ID
# List Records
designate record-list $DOMAIN_ID
# Fetch the record - should be gone
designate record-get $DOMAIN_ID $CNAME_RECORD_ID || echo "good - record was removed"
# verify not in DNS anymore
ensure_record_absent $CNAME_RECORD_NAME CNAME $DOMAIN_NAME
# Testing Domains Delete
# ======================
# Delete the domain
designate domain-delete $DOMAIN_ID
# Fetch the domain - should be gone
designate domain-get $DOMAIN_ID || echo "good - domain was removed"
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End DevStack Exercise: $0"
echo "*********************************************************************"

View File

@ -1,24 +0,0 @@
#!/bin/bash
set -ex
pushd $BASE/new/devstack
DEVSTACK_GATE_DESIGNATE_DRIVER=${DEVSTACK_GATE_DESIGNATE_DRIVER:-powerdns}
export KEEP_LOCALRC=1
export ENABLED_SERVICES=designate,designate-api,designate-central,designate-sink,designate-mdns,designate-pool-manager,designate-zone-manager
echo "DESIGNATE_SERVICE_PORT_DNS=5322" >> $BASE/new/devstack/localrc
echo "DESIGNATE_BACKEND_DRIVER=$DEVSTACK_GATE_DESIGNATE_DRIVER" >> $BASE/new/devstack/localrc
echo "DESIGNATE_PERIODIC_RECOVERY_INTERVAL=20" >> $BASE/new/devstack/localrc
echo "DESIGNATE_PERIODIC_SYNC_INTERVAL=20" >> $BASE/new/devstack/localrc
# Pass through any DESIGNATE_ env vars to the localrc file
env | grep -E "^DESIGNATE_" >> $BASE/new/devstack/localrc || :
popd
# Run DevStack Gate
$BASE/new/devstack-gate/devstack-vm-gate.sh

View File

@ -1,50 +0,0 @@
#!/bin/bash
set -ex
# Run the Designate DevStack exercises
$BASE/new/designate/devstack/exercise.sh
# Import functions needed for the below workaround
source $BASE/new/devstack/functions
# Workaround for Tempest architectural changes
# See bugs:
# 1) https://bugs.launchpad.net/manila/+bug/1531049
# 2) https://bugs.launchpad.net/tempest/+bug/1524717
TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secretadmin"}
sudo chown -R $USER:stack $BASE/new/tempest
sudo chown -R $USER:stack $BASE/data/tempest
iniset $TEMPEST_CONFIG auth admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONFIG auth admin_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG auth admin_tenant_name $ADMIN_TENANT_NAME
iniset $TEMPEST_CONFIG auth admin_domain_name ${ADMIN_DOMAIN_NAME:-"Default"}
iniset $TEMPEST_CONFIG identity username ${TEMPEST_USERNAME:-"demo"}
iniset $TEMPEST_CONFIG identity password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity tenant_name ${TEMPEST_TENANT_NAME:-"demo"}
iniset $TEMPEST_CONFIG identity alt_username ${ALT_USERNAME:-"alt_demo"}
iniset $TEMPEST_CONFIG identity alt_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity alt_tenant_name ${ALT_TENANT_NAME:-"alt_demo"}
iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
iniset $TEMPEST_CONFIG validation network_for_ssh ${PRIVATE_NETWORK_NAME:-"private"}
# Run the Designate Tempest tests
sudo BASE=$BASE ./run_tempest_tests.sh
# TODO(pglass) - update cli tests to look in the [auth] section for admin creds
iniset $TEMPEST_CONFIG identity admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONFIG identity admin_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
iniset $TEMPEST_CONFIG identity admin_domain_name ${ADMIN_DOMAIN_NAME:-"Default"}
# must match the dir where `openstack` is installed
DESIGNATE_CLI_DIR=${DESIGNATE_CLI_DIR:-"$BASE/new/python-designateclient"}
iniset $TEMPEST_CONFIG designateclient directory "$DESIGNATE_CLI_DIR/.venv/bin"
# Run the python-designateclient functional tests
sudo BASE=$BASE ./run_cli_tests.sh

View File

@ -1,28 +0,0 @@
#!/bin/bash -e
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DESIGNATE_CLI_DIR=${DESIGNATE_CLI_DIR:-"$BASE/new/python-designateclient"}
TEMPEST_DIR=${TEMPEST_DIR:-"$BASE/new/tempest"}
export TEMPEST_CONFIG=$TEMPEST_DIR/etc/tempest.conf
pushd $DESIGNATE_CLI_DIR
# we need the actual openstack executable which is not installed by tox
virtualenv "$DESIGNATE_CLI_DIR/.venv"
source "$DESIGNATE_CLI_DIR/.venv/bin/activate"
pip install python-openstackclient
pip install .
tox -e functional -- --concurrency 4
popd

View File

@ -1,32 +0,0 @@
#!/bin/bash -e
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# How many seconds to wait for the API to be responding before giving up
API_RESPONDING_TIMEOUT=20
if ! timeout ${API_RESPONDING_TIMEOUT} sh -c "while ! curl -s http://127.0.0.1:9001/ 2>/dev/null | grep -q 'v2' ; do sleep 1; done"; then
echo "The Designate API failed to respond within ${API_RESPONDING_TIMEOUT} seconds"
exit 1
fi
echo "Successfully contacted the Designate API"
# Where Designate and Tempest code lives
DESIGNATE_DIR=${DESIGNATE_DIR:-"$BASE/new/designate"}
TEMPEST_DIR=${TEMPEST_DIR:-"$BASE/new/tempest"}
pushd $DESIGNATE_DIR
export TEMPEST_CONFIG=$TEMPEST_DIR/etc/tempest.conf
tox -e functional -- --concurrency 4
popd

View File

@ -1,664 +0,0 @@
#!/usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network simulator
~~~~~~~~~~~~~~~~~
Perform end-to-end stress tests on Designate on a simulated network
that displays high latency and packet loss (almost like real ones)
WARNING: this script is to be run on a disposable devstack VM
It requires sudo and it will configure /sbin/tc
Usage:
cd <designate_repo>/contrib/vagrant
./setup_ubuntu_devstack
vagrant ssh ubuntu
source ~/devstack/openrc
/opt/stack/designate/devstack/networking_test.py
Monitor the logfiles
"""
from argparse import ArgumentParser
from collections import OrderedDict
from itertools import product
from subprocess import check_output
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from threading import Thread
import json
import logging
import os
import random
import string
import time
import sys
import dns
import dns.resolver
log = logging.getLogger()
tc_path = '/sbin/tc'
sudo_path = '/usr/bin/sudo'
iptables_restore_path = '/sbin/iptables-restore'
designate_cli_path = '/usr/local/bin/designate'
openstack_cli = 'openstack'
def gen_random_name(l):
return "".join(
random.choice(string.ascii_lowercase + string.digits)
for n in range(l)
)
def parse_args():
ap = ArgumentParser()
ap.add_argument('-d', '--debug', action='store_true')
return ap.parse_args()
def run_shell(cmd, env=None):
log.debug(" running %s" % cmd)
out = check_output(cmd, env=env, shell=True, executable='/bin/bash')
return [line.rstrip() for line in out.splitlines()]
class DesignateCLI(object):
"""Designate CLI runner
"""
def __init__(self):
"""Setup CLI handler"""
self._cli_env = {}
for k, v in sorted(os.environ.items()):
if k.startswith('OS_'):
log.debug("%s: %s", k, v)
self._cli_env[k] = v
def setup_quota(self, quota):
"""Setup quota
"""
user_id = self.run_json("token issue")["user_id"]
cmd = """quota-update
--domains %(quota)d
--domain-recordsets %(quota)d
--recordset-records %(quota)d
--domain-records %(quota)d
%(user_id)s """
cmd = ' '.join(cmd.split())
quotas = self.run_designate_cli_table(cmd % dict(quota=quota,
user_id=user_id))
assert quotas['domain_records'] == str(quota)
def run(self, cmd):
"""Run a openstack client command
"""
return run_shell("%s %s" % (openstack_cli, cmd),
env=self._cli_env)
def run_json(self, cmd):
"""Run a openstack client command using JSON output
:returns: dict
:raises CalledProcessError:
"""
cmd = "%s %s -f json" % (openstack_cli, cmd)
log.debug(" running %s" % cmd)
out = check_output(cmd, env=self._cli_env, shell=True,
executable='/bin/bash')
return json.loads(out)
def runcsv(self, cmd):
"""Run a command using the -f csv flag, parse the output
and return a list of dicts
"""
cmdout = self.run(cmd + " -f csv")
header = [item.strip('"') for item in cmdout[0].split(',')]
output_rows = []
for line in cmdout[1:]:
rawvalues = line.split(',')
d = OrderedDict()
for k, v in zip(header, rawvalues):
if v.startswith('"') or v.endswith('"'):
v = v.strip('"')
else:
try:
v = int(v)
except ValueError:
v = float(v)
d[k] = v
output_rows.append(d)
return output_rows
def run_designate_cli_table(self, cmd):
"""Run a command in the designate cli expecting a table to be
returned and parse it into a dict
"""
cmdout = run_shell("%s %s" % (designate_cli_path, cmd),
env=self._cli_env)
out = {}
try:
for line in cmdout:
if not line.startswith('| '):
continue
if not line.endswith(' |'):
continue
k = line.split('|')[1].strip()
v = line.split('|')[2].strip()
out[k] = v
except Exception:
log.error("Unable to parse output into a dict:")
for line in out:
log.error(line)
log.error("-----------------------------------")
raise
return out
class TrafficControl(object):
"""Configure Linux Traffic Control to simulate a real network
"""
protocol_marks = dict(
mysql=1,
dns_udp=2,
dns_tcp=3,
)
def run_tc(self, cmd):
return run_shell("%s %s %s" % (sudo_path, tc_path, cmd))
def _apply_iptables_conf(self, ipt_conf):
tf = NamedTemporaryFile()
tf.file.write(ipt_conf)
tf.file.flush()
run_shell("%s %s %s" % (sudo_path, iptables_restore_path, tf.name))
tf.file.close()
def cleanup_iptables_marking(self):
# Currently unneeded
ipt_conf = """
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
COMMIT
"""
self._apply_iptables_conf(ipt_conf)
def setup_iptables_marking(self):
# Currently unneeded
ipt_conf = """
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
-A PREROUTING -i lo -p tcp -m tcp --dport 3306 -j MARK --set-xmark %(mysql)s
-A PREROUTING -i lo -p tcp -m tcp --sport 3306 -j MARK --set-xmark %(mysql)s
-A PREROUTING -i lo -p tcp -m tcp --dport 53 -j MARK --set-xmark %(dns_tcp)s
-A PREROUTING -i lo -p tcp -m tcp --sport 53 -j MARK --set-xmark %(dns_tcp)s
-A PREROUTING -i lo -p udp -m udp --dport 53 -j MARK --set-xmark %(dns_udp)s
-A PREROUTING -i lo -p udp -m udp --sport 53 -j MARK --set-xmark %(dns_udp)s
COMMIT
"""
marks = dict((k, "0x%d/0xffffffff" % v)
for k, v in self.protocol_marks.items())
ipt_conf = ipt_conf % marks
self._apply_iptables_conf(ipt_conf)
def cleanup_tc(self):
"""Clean up tc conf
"""
out = self.run_tc('qdisc show dev lo')
if out:
log.debug("Cleaning up tc conf")
self.run_tc('qdisc del dev lo root')
else:
log.debug("No tc conf to be cleaned up")
def setup_tc(self, dns_latency_ms=0, dns_packet_loss_perc=0,
db_latency_ms=1, db_packet_loss_perc=1):
"""Setup traffic control
"""
self.cleanup_tc()
# Create HTB at the root
self.run_tc("qdisc add dev lo handle 1: root htb")
self.run_tc("class add dev lo parent 1: classid 1:5 htb rate 1000Mbps")
self.run_tc("class add dev lo parent 1: classid 1:7 htb rate 1000Mbps")
# TCP DNS
self._setup_tc_block('1:8', 'tcp', 53, dns_latency_ms,
dns_packet_loss_perc)
# UDP DNS
self._setup_tc_block('1:9', 'udp', 53, dns_latency_ms,
dns_packet_loss_perc)
# TCP mDNS
self._setup_tc_block('1:10', 'tcp', 5354, dns_latency_ms,
dns_packet_loss_perc)
# UDP mDNS
self._setup_tc_block('1:11', 'udp', 5354, dns_latency_ms,
dns_packet_loss_perc)
# MySQL
self._setup_tc_block('1:12', 'tcp', 3306, 1, 1)
# RabbitMQ port: 5672
self._setup_tc_block('1:13', 'tcp', 5672, 1, 1)
# MemcacheD
self._setup_tc_block('1:14', 'tcp', 11211, 1, 1)
def _setup_tc_block(self, class_id, proto, port, latency_ms,
packet_loss_perc):
"""Setup tc htb entry, netem and filter"""
assert proto in ('tcp', 'udp')
cmd = "class add dev lo parent 1: classid %s htb rate 1000Mbps" % \
class_id
self.run_tc(cmd)
self._setup_netem(class_id, latency_ms, latency_ms, packet_loss_perc)
self._setup_filter(proto, 'sport %d' % port, class_id)
self._setup_filter(proto, 'dport %d' % port, class_id)
def _setup_netem(self, classid, latency1, latency2, loss_perc):
"""Setup tc netem
"""
# This could be done with the FireQOS tool instead:
# https://firehol.org/tutorial/fireqos-new-user/
cmd = ("qdisc add dev lo parent {cid} netem"
" corrupt 0.1%"
" delay {lat1}ms {lat2}ms distribution normal"
" duplicate 0.1%"
" loss {packet_loss_perc}%"
" reorder 25% 50%")
cmd = cmd.format(cid=classid, lat1=latency1, lat2=latency2,
packet_loss_perc=loss_perc)
self.run_tc(cmd)
def _setup_filter(self, protocol, filter, flowid):
"""Setup tc filter
"""
protocol_nums = dict(tcp=6, udp=17)
pnum = protocol_nums[protocol]
cmd = "filter add dev lo protocol ip prio 1 u32 match ip protocol " \
"%(pnum)d 0xff match ip %(filter)s 0xffff flowid %(flowid)s"
self.run_tc(cmd % dict(pnum=pnum, filter=filter, flowid=flowid))
class Digger(object):
def __init__(self):
self.ns_ipaddr = self.get_nameserver_ipaddr()
self._setup_resolver()
self.max_probes_per_second = 30
self.reset_goals()
@property
def prober_is_running(self):
try:
return self._prober_thread.is_alive()
except AttributeError:
return False
def _setup_resolver(self, timeout=1):
resolver = dns.resolver.Resolver(configure=False)
resolver.timeout = timeout
resolver.lifetime = timeout
resolver.nameservers = [self.ns_ipaddr]
self.resolver = resolver
def get_nameserver_ipaddr(self):
# FIXME: find a better way to do this
out = run_shell('sudo netstat -nlpt | grep pdns_server')
ipaddr = out[0].split()[3]
ipaddr = ipaddr.split(':', 1)[0]
log.debug("Resolver ipaddr: %s" % ipaddr)
return ipaddr
def query_a_record(self, record_name, timeout=3):
try:
answer = self.resolver.query(record_name, 'A')
if answer.rrset:
return answer.rrset[0].address
except Exception:
return None
def query_soa(self, zone_name, timeout=3):
try:
soa_answer = self.resolver.query(zone_name, 'SOA')
soa_serial = soa_answer[0].serial
return soa_serial
except Exception:
return None
def reset_goals(self):
assert not self.prober_is_running
self.goals = set()
self.summary = dict(
success_cnt=0,
total_time_to_success=0,
)
def add_goal(self, goal):
self.goals.add(goal + (time.time(), ))
def _print_summary(self, final=True):
"""Log out a summary of the current run
"""
remaining = len(self.goals)
success_cnt = self.summary['success_cnt']
try:
avg_t = (self.summary['total_time_to_success'] / success_cnt)
avg_t = ", avg time to success: %2.3fs" % avg_t
except ZeroDivisionError:
avg_t = ''
logf = log.info if final else log.debug
logf(" test summary: success %3d, remaining %3d %s" % (
success_cnt, remaining, avg_t))
def _probe_resolver(self):
"""Probe the local resolver, report achieved goals
"""
log.debug("Starting prober")
assert self.prober_is_running is True
self._progress_report_time = 0
now = time.time()
while (self.goals or not self.prober_can_stop) and \
now < self.prober_timeout_time:
for goal in tuple(self.goals):
goal_type = goal[0]
if goal_type == 'zone_serial_ge':
goal_type, zone_name, serial, t0 = goal
actual_serial = self.query_soa(zone_name)
if actual_serial and actual_serial >= serial:
deltat = time.time() - t0
log.debug(" reached %s in %.3fs" % (repr(goal),
deltat))
self.goals.discard(goal)
self.summary['success_cnt'] += 1
self.summary['total_time_to_success'] += deltat
elif goal_type == 'record_a':
goal_type, record_name, ipaddr, t0 = goal
actual_ipaddr = self.query_a_record(record_name)
if actual_ipaddr == ipaddr:
deltat = time.time() - t0
log.debug(" reached %s in %.3fs" % (repr(goal),
deltat))
self.goals.discard(goal)
self.summary['success_cnt'] += 1
self.summary['total_time_to_success'] += deltat
else:
log.error("Unknown goal %r" % goal)
if time.time() < self.prober_timeout_time:
time.sleep(1.0 / self.max_probes_per_second)
else:
break
if time.time() > self._progress_report_time:
self._print_summary(final=False)
self._progress_report_time = time.time() + 10
time.sleep(1.0 / self.max_probes_per_second)
now = time.time()
if now > self.prober_timeout_time:
log.info("prober timed out after %d s" % (
now - self.prober_start_time))
self._print_summary()
def probe_resolver(self, timeout=600):
"""Probe the local resolver in a dedicated thread until all
goals have been achieved or timeout occours
"""
assert not self.prober_is_running
self.prober_can_stop = False
self.prober_start_time = time.time()
self.prober_timeout_time = self.prober_start_time + timeout
self._prober_thread = Thread(target=self._probe_resolver)
self._prober_thread.daemon = True
self._prober_thread.start()
def stop_prober(self):
self.prober_can_stop = True
self.prober_timeout_time = 0
def wait_on_prober(self):
self.prober_can_stop = True
self._prober_thread.join()
assert self.prober_is_running is False
def list_zones(cli):
zones = [z["name"] for z in cli.run_json('zone list')]
log.debug("Found zones: %r", zones)
return zones
def delete_zone_by_name(cli, zn, ignore_missing=False):
if ignore_missing:
# Return if the zone is not present
zones = list_zones(cli)
if zn not in zones:
return
cli.run('zone delete %s' % zn)
def create_and_probe_a_record(cli, digger, zone_id, record_name, ipaddr):
cli.run_json('recordset create %s %s --type A --records %s' %
(zone_id, record_name, ipaddr))
digger.add_goal(('record_a', record_name, ipaddr))
def delete_all_zones(cli):
zones = list_zones(cli)
log.info("%d zones to be deleted" % len(zones))
for zone in zones:
log.info("Deleting %s", zone)
delete_zone_by_name(cli, zone)
def create_zone_with_retry_on_duplicate(cli, digger, zn, timeout=300,
dig=False):
"""Create a zone, retry when a duplicate is found,
optionally monitor for propagation
:returns: dict
"""
t0 = time.time()
timeout_time = timeout + t0
created = False
while time.time() < timeout_time:
try:
output = cli.run_json(
"zone create %s --email devstack@example.org" % zn)
created = True
log.debug(" zone created after %f" % (time.time() - t0))
break
except CalledProcessError as e:
if e.output == 'Duplicate Zone':
# dup zone, sleep and retry
time.sleep(1)
pass
elif e.output == 'over_quota':
raise RuntimeError('over_quota')
else:
raise
assert output['serial']
if not created:
raise RuntimeError('timeout')
if dig:
digger.reset_goals()
digger.add_goal(('zone_serial_ge', zn, int(output['serial'])))
digger.probe_resolver(timeout=timeout)
digger.wait_on_prober()
return output
def test_create_list_delete_loop(cli, digger, cycles_num, zn='cld.org.'):
"""Create, list, delete a zone in a loop
Monitor for propagation time
"""
log.info("Test zone creation, list, deletion")
delete_zone_by_name(cli, zn, ignore_missing=True)
for cycle_cnt in range(cycles_num):
zone = create_zone_with_retry_on_duplicate(cli, digger, zn, dig=True)
zones = cli.runcsv('domain-list')
assert any(z['name'] == zn for z in zones), zones
cli.run('domain-delete %s' % zone['id'])
zones = cli.runcsv('domain-list')
assert not any(z['name'] == zn for z in zones), zones
log.info("done")
def test_one_big_zone(cli, digger, zone_size):
"""Create a zone with many records,
perform CRUD on records and monitor for propagation time
"""
t0 = time.time()
zn = 'bigzone-%s.org.' % gen_random_name(12)
delete_zone_by_name(cli, zn, ignore_missing=True)
zone = create_zone_with_retry_on_duplicate(cli, digger, zn, dig=True)
assert 'serial' in zone, zone
assert 'id' in zone, zone
try:
digger.reset_goals()
digger.add_goal(('zone_serial_ge', zn, int(zone['serial'])))
digger.probe_resolver(timeout=60)
record_creation_threads = []
for record_num in range(zone_size):
record_name = "rec%d" % record_num
ipaddr = "127.%d.%d.%d" % (
(record_num >> 16) % 256,
(record_num >> 8) % 256,
record_num % 256,
)
t = Thread(target=create_and_probe_a_record,
args=(cli, digger, zone['id'], record_name, ipaddr))
t.start()
record_creation_threads.append(t)
time.sleep(.5)
digger.wait_on_prober()
except KeyboardInterrupt:
log.info("Exiting on keyboard")
raise
finally:
digger.stop_prober()
delete_zone_by_name(cli, zone['name'])
log.info("Done in %ds" % (time.time() - t0))
def test_servers_are_configured(cli):
servers = cli.runcsv('server-list')
assert servers[0]['name'] == 'ns1.devstack.org.'
log.info("done")
def test_big_zone(args, cli, digger, tc):
log.info("Test creating many records in one big zone")
dns_latencies_ms = (1, 100)
dns_packet_losses = (1, 15)
zone_size = 20
for dns_latency_ms, dns_packet_loss_perc in product(dns_latencies_ms,
dns_packet_losses):
tc.cleanup_tc()
tc.setup_tc(dns_latency_ms=dns_latency_ms,
dns_packet_loss_perc=dns_packet_loss_perc)
log.info("Running test with DNS latency %dms packet loss %d%%" % (
dns_latency_ms, dns_packet_loss_perc))
test_one_big_zone(cli, digger, zone_size)
def run_tests(args, cli, digger, tc):
"""Run all integration tests
"""
# test_servers_are_configured(cli)
# test_create_list_delete_loop(cli, digger, 10)
test_big_zone(args, cli, digger, tc)
def main():
args = parse_args()
loglevel = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(
level=loglevel,
format='%(relativeCreated)8d %(levelname)s %(funcName)20s %(message)s',
)
cli = DesignateCLI()
cli.setup_quota(10000)
digger = Digger()
delete_all_zones(cli)
tc = TrafficControl()
tc.cleanup_tc()
try:
run_tests(args, cli, digger, tc)
finally:
tc.cleanup_tc()
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,3 +0,0 @@
#!/bin/bash
IF=lo
watch -n1 "tc -p -s -d qdisc show dev $IF; echo; tc class show dev $IF; echo; tc filter show dev $IF"

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple mock UDP server to receive monasca-statsd traffic
Log to stdout or to a file.
"""
from argparse import ArgumentParser
import sys
from time import gmtime
from time import strftime
import SocketServer
def parse_args():
ap = ArgumentParser()
ap.add_argument('--addr', default='127.0.0.1',
help='Listen IP addr (default: 127.0.0.1)')
ap.add_argument('--port', default=8125, type=int,
help='UDP port (default: 8125)')
ap.add_argument('--output-fname', default=None,
help='Output file (default: stdout)')
return ap.parse_args()
class StatsdMessageHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
tstamp = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
if self._output_fd:
self._output_fd.write("%s %s\n" % (tstamp, data))
else:
print("%s %s" % (tstamp, data))
def main():
args = parse_args()
fd = open(args.output_fname, 'a') if args.output_fname else None
StatsdMessageHandler._output_fd = fd
server = SocketServer.UDPServer(
(args.addr, args.port),
StatsdMessageHandler,
)
server.serve_forever()
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,195 +0,0 @@
#!/bin/bash
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $TOP_DIR/openrc admin admin
source $TOP_DIR/stackrc
set -o xtrace
DESIGNATE_PROJECT=designate_grenade
DESIGNATE_USER=designate_grenade
DESIGNATE_PASS=designate_grenade
DESIGNATE_ZONE_NAME=example.com.
DESIGNATE_ZONE_EMAIL=hostmaster@example.com
DESIGNATE_RRSET_NAME=www.example.com.
DESIGNATE_RRSET_TYPE=A
DESIGNATE_RRSET_RECORD=10.0.0.1
# used with dig to look up in DNS
DIG_FLAGS="-p $DESIGNATE_SERVICE_PORT_DNS @$SERVICE_HOST"
DIG_TIMEOUT=30
function _set_designate_user {
OS_TENANT_NAME=$DESIGNATE_PROJECT
OS_PROJECT_NAME=$DESIGNATE_PROJECT
OS_USERNAME=$DESIGNATE_USER
OS_PASSWORD=$DESIGNATE_PASS
}
function _ensure_recordset_present {
local record_name=$1
local record_type=$2
local record_value=$3
if [ "$DESIGNATE_BACKEND_DRIVER" = "fake" ] ; then
# if the backend is fake, there will be no actual DNS records
return 0
fi
if ! timeout $DIG_TIMEOUT sh -c "while ! dig +short $DIG_FLAGS $record_name $record_type | grep \"$record_value\"; do sleep 1; done"; then
die $LINENO "Error: record $record_name ($record_type) not found in DNS"
fi
# Display for debugging
dig $DIG_FLAGS $record_name $record_type
return 0
}
function create {
# create a tenant for the server
eval $(openstack project create -f shell -c id $DESIGNATE_PROJECT)
if [[ -z "$id" ]]; then
die $LINENO "Didn't create $DESIGNATE_PROJECT project"
fi
resource_save designate project_id $id
local project_id=$id
# create the user, and set $id locally
eval $(openstack user create $DESIGNATE_USER \
--project $project_id \
--password $DESIGNATE_PASS \
-f shell -c id)
if [[ -z "$id" ]]; then
die $LINENO "Didn't create $DESIGNATE_USER user"
fi
resource_save designate user_id $id
# BUG(sdague): this really shouldn't be required, in Keystone v2 a
# user created in a project was assigned to that project, in v3 it
# is not - https://bugs.launchpad.net/keystone/+bug/1662911
openstack role add Member --user $id --project $project_id
_set_designate_user
# Create a zone, and save the id
eval $(openstack zone create --email $DESIGNATE_ZONE_EMAIL \
$DESIGNATE_ZONE_NAME \
-f shell -c id)
resource_save designate zone_id $id
eval $(openstack recordset create --records $DESIGNATE_RRSET_RECORD \
--type $DESIGNATE_RRSET_TYPE \
$DESIGNATE_ZONE_NAME \
$DESIGNATE_RRSET_NAME \
-f shell -c id)
resource_save designate rrset_id $id
# wait until rrset moves to active state
local timeleft=1000
while [[ $timeleft -gt 0 ]]; do
local status
eval $(openstack recordset show $DESIGNATE_ZONE_NAME \
$DESIGNATE_RRSET_NAME \
-f shell -c status)
if [[ "$status" != "ACTIVE" ]]; then
if [[ "$cluster_state" == "Error" ]]; then
die $LINENO "Zone is in Error state"
fi
echo "Zone is still not in Active state"
sleep 10
timeleft=$((timeleft - 10))
if [[ $timeleft == 0 ]]; then
die $LINENO "Zone hasn't moved to Active state \
during 1000 seconds"
fi
else
break
fi
done
}
function verify {
_set_designate_user
# check that cluster is in Active state
local zone_id
zone_id=$(resource_get designate zone_id)
local rrset_id
rrset_id=$(resource_get designate rrset_id)
eval $(openstack zone show $zone_id -f shell -c status)
echo -n $status
if [[ "$status" != "ACTIVE" ]]; then
die $LINENO "Zone is not in Active state anymore"
fi
eval $(openstack recordset show $zone_id $rrset_id -f shell -c status)
echo -n $status
if [[ "$status" != "ACTIVE" ]]; then
die $LINENO "Recordset is not in Active state anymore"
fi
echo "Designate verification: SUCCESS"
}
function verify_noapi {
_ensure_recordset_present $DESIGNATE_RRSET_NAME $DESIGNATE_RRSET_TYPE $DESIGNATE_RRSET_RECORD
}
function destroy {
_set_designate_user
set +o errexit
# delete cluster
local cluster_id
zone_id=$(resource_get designate zone_id)
openstack zone delete $zone_id > /dev/null
# wait for cluster deletion
local timeleft=500
while [[ $timeleft -gt 0 ]]; do
openstack zone show $zone_id > /dev/null
local rc=$?
if [[ "$rc" != 1 ]]; then
echo "Zone still exists"
sleep 5
timeleft=$((timeleft - 5))
if [[ $timeleft == 0 ]]; then
die $LINENO "Zone hasn't been deleted during 500 seconds"
fi
else
break
fi
done
}
# Dispatcher
case $1 in
"create")
create
;;
"verify_noapi")
verify_noapi
;;
"verify")
verify
;;
"destroy")
destroy
;;
"force_destroy")
set +o errexit
destroy
;;
esac

View File

@ -1,11 +0,0 @@
register_project_for_upgrade designate
register_db_to_save designate
devstack_localrc base enable_plugin designate https://git.openstack.org/openstack/designate
devstack_localrc target enable_plugin designate https://git.openstack.org/openstack/designate
devstack_localrc base enable_service designate-api designate-central designate-producer designate-worker designate-mdns designate-agent designate-sink designate horizon
devstack_localrc target enable_service designate-api designate-central designate-producer designate-worker designate-mdns designate-agent designate-sink designate horizon
BASE_RUN_SMOKE=False
TARGET_RUN_SMOKE=False

View File

@ -1,38 +0,0 @@
#!/bin/bash
# ``shutdown-designate``
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
# We need base DevStack functions for this
source $BASE_DEVSTACK_DIR/functions
source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
source $BASE_DEVSTACK_DIR/lib/tls
source ${GITDIR[designate]}/devstack/plugin.sh
set -o xtrace
stop_process designate-central
stop_process designate-api
stop_process designate-mdns
stop_process designate-agent
stop_process designate-sink
if is_service_enabled designate-worker; then
stop_process designate-worker
stop_process designate-producer
else
stop_process designate-pool-manager
stop_process designate-zone-manager
fi
# sanity check that service is actually down
ensure_services_stopped designate-api designate-central designate-mdns designate-agent designate-sink
if is_service_enabled designate-worker; then
ensure_services_stopped designate-worker designate-producer
else
ensure_services_stopped designate-pool-manager designate-zone-manager
fi

View File

@ -1,96 +0,0 @@
#!/usr/bin/env bash
# ``upgrade-designate``
echo "*********************************************************************"
echo "Begin $0"
echo "*********************************************************************"
# Clean up any resources that may be in use
cleanup() {
set +o errexit
echo "********************************************************************"
echo "ERROR: Abort $0"
echo "********************************************************************"
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Keep track of the grenade directory
RUN_DIR=$(cd $(dirname "$0") && pwd)
# Source params
source $GRENADE_DIR/grenaderc
# Import common functions
source $GRENADE_DIR/functions
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Upgrade designate
# ============
# Get functions from current DevStack
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/tls
source $(dirname $(dirname $BASH_SOURCE))/plugin.sh
source $(dirname $(dirname $BASH_SOURCE))/settings
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Save current config files for posterity
[[ -d $SAVE_DIR/etc.designate ]] || cp -pr $DESIGNATE_CONF_DIR $SAVE_DIR/etc.designate
# install_designate()
if is_ubuntu; then
install_package libcap2-bin
elif is_fedora; then
# bind-utils package provides `dig`
install_package libcap bind-utils
fi
git_clone $DESIGNATE_REPO $DESIGNATE_DIR $DESIGNATE_BRANCH
setup_develop $DESIGNATE_DIR
install_designateclient
# calls upgrade-designate for specific release
upgrade_project designate $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
# Migrate the database
$DESIGNATE_BIN_DIR/designate-manage --config-file $DESIGNATE_CONF \
database sync || die $LINENO "DB sync error"
# Start designate
run_process designate-central "$DESIGNATE_BIN_DIR/designate-central --config-file $DESIGNATE_CONF"
run_process designate-api "$DESIGNATE_BIN_DIR/designate-api --config-file $DESIGNATE_CONF"
run_process designate-producer "$DESIGNATE_BIN_DIR/designate-producer --config-file $DESIGNATE_CONF"
run_process designate-worker "$DESIGNATE_BIN_DIR/designate-worker --config-file $DESIGNATE_CONF"
run_process designate-mdns "$DESIGNATE_BIN_DIR/designate-mdns --config-file $DESIGNATE_CONF"
run_process designate-agent "$DESIGNATE_BIN_DIR/designate-agent --config-file $DESIGNATE_CONF"
run_process designate-sink "$DESIGNATE_BIN_DIR/designate-sink --config-file $DESIGNATE_CONF"
# Start proxies if enabled
if is_service_enabled designate-api && is_service_enabled tls-proxy; then
start_tls_proxy '*' $DESIGNATE_SERVICE_PORT $DESIGNATE_SERVICE_HOST $DESIGNATE_SERVICE_PORT_INT &
fi
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT; do sleep 1; done"; then
die $LINENO "Designate did not start"
fi
# Don't succeed unless the service come up
ensure_services_started designate-api designate-central designate-producer designate-worker designate-mdns designate-agent designate-sink
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
echo "*********************************************************************"