Support heterogeneous OVB environments

This adds support for heterogeneous OVB environments to the
te-broker.  It is primarily intended for scale testing jobs, since
the normal test jobs only deploy a single compute node.  We wouldn't
gain much by using a smaller flavor for that one vm and there is a
cost in complexity to setting up the environment.

Right now this will only work for jobs that deploy just control and
compute nodes.  Support for a third role type for ceph or others
could be added in a similar fashion.

Change-Id: I398d13356b3c15c0c7cd448366186b7589ad93e4
This commit is contained in:
Ben Nemec 2017-04-20 20:47:30 +00:00
parent 3da2845ebf
commit 722f247838
4 changed files with 74 additions and 24 deletions

View File

@ -8,9 +8,12 @@ CREATE_UNDERCLOUD=${4:-""}
SSH_KEY=${5:-""}
# We recognize 3 values for NETISO: none, multi-nic, public-bond
NETISO=${6:-'multi-nic'}
COMPUTE_NODECOUNT=${7:-0}
PROVISIONNET=provision-${ENVNUM}
PUBLICNET=public-${ENVNUM}
ENVFILE=env-${ENVNUM}.yaml
ENVFILE=env-${ENVNUM}-base.yaml
COMPUTE_ENVFILE=env-${ENVNUM}-compute.yaml
ROLE_ARGS=
source /etc/nodepoolrc
@ -45,22 +48,45 @@ fi
echo 'parameter_defaults:' >> $ENVFILE
echo ' bmc_use_cache: True' >> $ENVFILE
if [ $COMPUTE_NODECOUNT -gt 0 ]; then
echo 'parameter_defaults:' >> $ENVFILE
echo ' role: control' >> $ENVFILE
fi
echo -e 'resource_registry:\n' >> $ENVFILE
if [ -z "$CREATE_UNDERCLOUD" ]; then
echo ' OS::OVB::UndercloudEnvironment: OS::Heat::None' >> $ENVFILE
fi
echo ' OS::OVB::UndercloudFloating: templates/undercloud-floating-none.yaml' >> $ENVFILE
if [ $NETISO == 'multi-nic' ]; then
echo ' OS::OVB::BaremetalNetworks: templates/baremetal-networks-all.yaml' >> $ENVFILE
echo ' OS::OVB::BaremetalPorts: templates/baremetal-ports-all.yaml' >> $ENVFILE
elif [ $NETISO == 'public-bond' ]; then
echo ' OS::OVB::BaremetalNetworks: templates/baremetal-networks-all.yaml' >> $ENVFILE
echo ' OS::OVB::BaremetalPorts: templates/baremetal-ports-public-bond.yaml' >> $ENVFILE
function add_net_iso_overrides() {
local outfile=$1
if [ $NETISO == 'multi-nic' ]; then
echo ' OS::OVB::BaremetalNetworks: templates/baremetal-networks-all.yaml' >> $outfile
echo ' OS::OVB::BaremetalPorts: templates/baremetal-ports-all.yaml' >> $outfile
elif [ $NETISO == 'public-bond' ]; then
echo ' OS::OVB::BaremetalNetworks: templates/baremetal-networks-all.yaml' >> $outfile
echo ' OS::OVB::BaremetalPorts: templates/baremetal-ports-public-bond.yaml' >> $outfile
else
echo ' OS::OVB::BaremetalPorts: templates/baremetal-ports-default.yaml' >> $outfile
fi
}
add_net_iso_overrides $ENVFILE
if [ $COMPUTE_NODECOUNT -gt 0 ]; then
/bin/cp --remove-destination templates/env-role.yaml.example $COMPUTE_ENVFILE
sed -i -e "s/baremetal_flavor:.*/baremetal_flavor: baremetal-compute/" $COMPUTE_ENVFILE
sed -i -e "s/key_name:.*/key_name: ${KEY_NAME}/" $COMPUTE_ENVFILE
sed -i -e "s/node_count:.*/node_count: ${COMPUTE_NODECOUNT}/" $COMPUTE_ENVFILE
# This is already the default in the template, but just to be safe.
sed -i -e "s/role:.*/role: compute/" $COMPUTE_ENVFILE
echo 'resource_registry:' >> $COMPUTE_ENVFILE
add_net_iso_overrides $COMPUTE_ENVFILE
ROLE_ARGS="--role $COMPUTE_ENVFILE"
fi
/opt/stack/openstack-virtual-baremetal/bin/deploy.py --env $ENVFILE --name baremetal_${ENVNUM} --quintupleo --id ${ENVNUM}
/opt/stack/openstack-virtual-baremetal/bin/deploy.py --env $ENVFILE --name baremetal_${ENVNUM} --quintupleo --id ${ENVNUM} $ROLE_ARGS
while ! heat stack-show baremetal_${ENVNUM} | grep CREATE_COMPLETE ; do
sleep 10
@ -78,5 +104,5 @@ if [ $NETISO != 'none' ]; then
fi
# This writes out the env file as env-ID.yaml while overwrites the one we have created
/opt/stack/openstack-virtual-baremetal/bin/build-nodes-json --env $ENVFILE \
/opt/stack/openstack-virtual-baremetal/bin/build-nodes-json --env env-${ENVNUM}.yaml \
--nodes_json ${TE_DATAFILE:-~/instackenv.json} $BUILD_NODES_JSON_EXTRA_ARGS

View File

@ -4,9 +4,12 @@ set -xe
ENVNUM=${1:-$(date +%s)}
PROVISIONNET=provision-${ENVNUM}
PUBLICNET=public-${ENVNUM}
ENVFILE=env-${ENVNUM}.yaml
ENVFILE=env-${ENVNUM}-base.yaml
COMPUTE_ENVFILE=env-${ENVNUM}-compute.yaml
rm -f /opt/stack/openstack-virtual-baremetal/$ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/env-${ENVNUM}.yaml
rm -f /opt/stack/openstack-virtual-baremetal/$COMPUTE_ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/temp-key-$ENVNUM.pub
source /etc/nodepoolrc
@ -38,16 +41,27 @@ wait
# If there was a keypair for this specific run, delete it.
openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
# NOTE(bnemec): I'm periodically seeing the stack-delete fail to connect to
# Heat. It looks like a transient network issue, so let's just retry when it happens.
for i in $(seq 10); do
heat stack-delete -y baremetal_${ENVNUM} && break
sleep 5
done
while heat stack-show baremetal_${ENVNUM} 2>&1 > /dev/null ; do
# If the delete failed, try again
if heat stack-show baremetal_${ENVNUM} | grep DELETE_FAILED ; then
heat stack-delete -y baremetal_${ENVNUM} || true
function delete_stack() {
local stackname=$1
# Nothing to do if the specified stack doesn't exist
if ! heat stack-show $stackname; then
return 0
fi
sleep 20
done
# NOTE(bnemec): I'm periodically seeing the stack-delete fail to connect to
# Heat. It looks like a transient network issue, so let's just retry when it happens.
for i in $(seq 10); do
heat stack-delete -y $stackname && break
sleep 5
done
while heat stack-show $stackname 2>&1 > /dev/null ; do
# If the delete failed, try again
if heat stack-show $stackname | grep DELETE_FAILED ; then
heat stack-delete -y $stackname || true
fi
sleep 20
done
}
# Extra role stacks must be deleted first
delete_stack baremetal_${ENVNUM}-compute
delete_stack baremetal_${ENVNUM}

View File

@ -138,7 +138,9 @@ class TEWorkerThread(threading.Thread):
arguments.get("ucinstance",""),
arguments.get("create_undercloud", ""),
arguments.get("ssh_key", ""),
arguments.get("net_iso", "multi-nic")],
arguments.get("net_iso", "multi-nic"),
arguments.get("compute_envsize","0"),
],
stderr=subprocess.STDOUT))
clientdata = fp.read()
except subprocess.CalledProcessError as e:

View File

@ -150,6 +150,13 @@ def main(args=sys.argv[1:]):
'be killed.')
parser.add_argument('--envsize', default="2",
help='Number of baremetal nodes to request')
parser.add_argument('--compute-envsize', default='0',
help='Number of compute baremetal nodes to request. '
'When this is set to a value > 0, the primary '
'nodes will be tagged with the controller '
'profile and the extra nodes with compute. The '
'compute nodes will be a smaller flavor in order '
'to use less resources.')
parser.add_argument('--ucinstance',
help='uuid for the undercloud instance (where an '
'interface on the provisioning net is attached')
@ -186,6 +193,7 @@ def main(args=sys.argv[1:]):
"callback_name": callback_name,
"timeout": opts.timeout,
"envsize":opts.envsize,
"compute_envsize":opts.compute_envsize,
"ucinstance":opts.ucinstance,
"create_undercloud": "true" if opts.create_undercloud else "",
"ssh_key":opts.ssh_key,