diff --git a/.zuul.yaml b/.zuul.yaml index 065293255..4a141f860 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -15,6 +15,8 @@ - openstack-tox-functional - openstack-tox-functional-py36 - placement-nova-tox-functional-py36 + - placement-nested-perfload: + voting: false - placement-perfload: voting: false - tempest-full-py3: @@ -74,3 +76,11 @@ - ^tox.ini$ run: playbooks/perfload.yaml post-run: playbooks/post.yaml + +- job: + name: placement-nested-perfload + parent: placement-perfload + description: | + A simple node on which to run placement with the barest of configs and + make nested performance related tests against it. + run: playbooks/nested-perfload.yaml diff --git a/gate/README b/gate/README index b7df20654..75b88ac43 100644 --- a/gate/README +++ b/gate/README @@ -1,4 +1,14 @@ -These are hooks to be used by the OpenStack infra test system. These scripts -may be called by certain jobs at important times to do extra testing, setup, -etc. They are really only relevant within the scope of the OpenStack infra -system and are not expected to be useful to anyone else. +This directory contains files used by the OpenStack infra test system. They are +really only relevant within the scope of the OpenStack infra system and are not +expected to be useful to anyone else. + +These files are a mixture of: + +* Hooks and other scripts to be used by the OpenStack infra test system. These + scripts may be called by certain jobs at important times to do extra testing, + setup, run services, etc. + +* "gabbits" are test files to be used with some of the jobs described in + .zuul.yaml and playbooks. When changes are made in the gabbits or playbooks + it is quite likely that queries in the playbooks or the assertions in the + gabbits will need to be updated. diff --git a/gate/gabbits/nested-perfload.yaml b/gate/gabbits/nested-perfload.yaml new file mode 100644 index 000000000..5e79bc75a --- /dev/null +++ b/gate/gabbits/nested-perfload.yaml @@ -0,0 +1,83 @@ +# This is a single compute with two numa nodes, to show some nested. +# +# This should be updated to represent something closer to a real +# and expected nested topology. If changes are made here that impact +# the number of total resource providers, then $COUNT in +# playbooks/nested-perfload.yaml should be updated. + +defaults: + request_headers: + accept: application/json + content-type: application/json + openstack-api-version: placement latest + x-auth-token: $ENVIRON['TOKEN'] + +tests: + - name: create one compute node + POST: /resource_providers + data: + uuid: $ENVIRON['CN_UUID'] + name: $ENVIRON['CN_UUID'] + + - name: set compute node inventory + PUT: /resource_providers/$ENVIRON['CN_UUID']/inventories + data: + resource_provider_generation: 0 + inventories: + DISK_GB: + total: 20480 + + - name: set compute node traits + PUT: /resource_providers/$ENVIRON['CN_UUID']/traits + data: + resource_provider_generation: 1 + traits: + - COMPUTE_VOLUME_MULTI_ATTACH + + - name: create numa 1 + POST: /resource_providers + data: + uuid: $ENVIRON['N1_UUID'] + name: numa 1-$ENVIRON['N1_UUID'] + parent_provider_uuid: $ENVIRON['CN_UUID'] + + - name: set numa 1 inventory + PUT: /resource_providers/$ENVIRON['N1_UUID']/inventories + data: + resource_provider_generation: 0 + inventories: + VCPU: + total: 16 + MEMORY_MB: + total: 16777216 + + - name: set numa 1 traits + PUT: /resource_providers/$ENVIRON['N1_UUID']/traits + data: + resource_provider_generation: 1 + traits: + - HW_CPU_X86_AVX2 + + - name: create numa 2 + POST: /resource_providers + data: + uuid: $ENVIRON['N2_UUID'] + name: numa 2-$ENVIRON['N2_UUID'] + parent_provider_uuid: $ENVIRON['CN_UUID'] + + - name: set numa 2 inventory + PUT: /resource_providers/$ENVIRON['N2_UUID']/inventories + data: + resource_provider_generation: 0 + inventories: + VCPU: + total: 16 + MEMORY_MB: + total: 16777216 + + - name: set numa 2 traits + PUT: /resource_providers/$ENVIRON['N2_UUID']/traits + data: + resource_provider_generation: 1 + traits: + - HW_CPU_X86_SSE diff --git a/gate/perfload-nested-loader.sh b/gate/perfload-nested-loader.sh new file mode 100755 index 000000000..0ef677235 --- /dev/null +++ b/gate/perfload-nested-loader.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -a +HOST=$1 +GABBIT=$2 + +# By default the placement server is set up with noauth2 authentication +# handling. If that is changed to keystone, a $TOKEN can be generated in +# the calling environment and used instead of the default 'admin'. +TOKEN=${TOKEN:-admin} + +# These are the dynamic/unique values for individual resource providers +# that need to be set for each run a gabbi file. Values that are the same +# for all the resource providers (for example, traits and inventory) should +# be set in $GABBIT. +CN_UUID=$(uuidgen) +N1_UUID=$(uuidgen) +N2_UUID=$(uuidgen) + +# Run gabbi silently. +gabbi-run -q $HOST -- $GABBIT diff --git a/gate/perfload-nested-runner.sh b/gate/perfload-nested-runner.sh new file mode 100755 index 000000000..c040b043a --- /dev/null +++ b/gate/perfload-nested-runner.sh @@ -0,0 +1,94 @@ +#!/bin/bash -x +WORK_DIR=$1 + +PLACEMENT_URL="http://localhost:8000" +LOG=placement-perf.txt +LOG_DEST=${WORK_DIR}/logs +# The gabbit used to create one nested provider tree. It takes +# inputs from LOADER to create a unique tree. +GABBIT=gate/gabbits/nested-perfload.yaml +LOADER=gate/perfload-nested-loader.sh + +# The query to be used to get a list of allocation candidates. If +# $GABBIT is changed, this may need to change. +TRAIT="COMPUTE_VOLUME_MULTI_ATTACH" +TRAIT1="HW_CPU_X86_AVX2" +PLACEMENT_QUERY="resources=DISK_GB:10&resources1=VCPU:1,MEMORY_MB:256&required=${TRAIT}&required1=${TRAIT1}&group_policy=isolate" + +# Number of nested trees to create. +ITERATIONS=1000 + +# Number of times to write allocations and then time again. +ALLOCATIONS_TO_WRITE=10 + +# The number of providers in each nested tree. This will need to +# need to change whenever the resource provider topology created in +# $GABBIT is changed. +PROVIDER_TOPOLOGY_COUNT=3 +# Expected total number of providers, used to check that creation +# was a success. +TOTAL_PROVIDER_COUNT=$((ITERATIONS * PROVIDER_TOPOLOGY_COUNT)) + +trap "sudo cp -p $LOG $LOG_DEST" EXIT + +function time_candidates { + ( + echo "##### TIMING GET /allocation_candidates?${PLACEMENT_QUERY} twice" + time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null + time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null + ) 2>&1 | tee -a $LOG +} + +function write_allocation { + # Take the first allocation request and send it back as a well-formed allocation + curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}&limit=5" \ + | jq --arg proj $(uuidgen) --arg user $(uuidgen) '.allocation_requests[0] + {consumer_generation: null, project_id: $proj, user_id: $user}' \ + | curl -s -H 'x-auth-token: admin' -H 'content-type: application/json' -H 'openstack-api-version: placement latest' \ + -X PUT -d @- "${PLACEMENT_URL}/allocations/$(uuidgen)" +} + +function load_candidates { + time_candidates + for iter in $(seq 1 $ALLOCATIONS_TO_WRITE); do + echo "##### Writing allocation ${iter}" | tee -a $LOG + write_allocation + time_candidates + done +} + +function check_placement { + local rp_count + local code + code=0 + + python -m virtualenv -p python3 .perfload + . .perfload/bin/activate + + # install placeload + pip install gabbi + + # Create $TOTAL_PROVIDER_COUNT nested resource provider trees, + # each tree having $PROVIDER_TOPOLOGY_COUNT resource providers. + # LOADER is called $ITERATIONS times in parallel by 3 * number + # of processors on the host. + echo "##### Creating $TOTAL_PROVIDER_COUNT providers" | tee -a $LOG + seq 1 $ITERATIONS | parallel -P 3% $LOADER $PLACEMENT_URL $GABBIT + + set +x + rp_count=$(curl -H 'x-auth-token: admin' ${PLACEMENT_URL}/resource_providers |json_pp|grep -c '"name"') + # Skip curl and note if we failed to create the required number of rps + if [[ $rp_count -ge $TOTAL_PROVIDER_COUNT ]]; then + load_candidates + else + ( + echo "Unable to create expected number of resource providers. Expected: ${COUNT}, Got: $rp_count" + echo "See job-output.txt.gz and logs/screen-placement-api.txt.gz for additional detail." + ) | tee -a $LOG + code=1 + fi + set -x + deactivate + exit $code +} + +check_placement diff --git a/gate/perfload-server.sh b/gate/perfload-server.sh new file mode 100755 index 000000000..ddd27faba --- /dev/null +++ b/gate/perfload-server.sh @@ -0,0 +1,30 @@ +#!/bin/bash -x + +WORK_DIR=$1 + +# create database +sudo debconf-set-selections <