[ci] Remove redundant rally task job files

These files left after migration of openstack plugins to the separate
repo

Also, this patch fixes unit test for FileExistsValidator validator which
uses rally-jobs dir for no reasons.

Change-Id: I9a4b35643907ce0f101607314aa70222a883368c
This commit is contained in:
Andrey Kurilin 2020-03-22 20:49:58 +02:00
parent 8dbad440db
commit 8dbfa86d02
12 changed files with 2 additions and 3208 deletions

View File

@ -1,40 +0,0 @@
Rally job related files
=======================
This directory contains rally tasks and plugins that are run by OpenStack CI.
Structure
---------
* plugins - directory where you can add rally plugins. Almost everything in
Rally is a plugin. Task contexts, scenarios, SLAs, Hooks, Generic
cleanup resources, ....
* extra - all files from this directory will be copy pasted to gates, so you
are able to use absolute paths in rally tasks.
Files will be located in ~/.rally/extra/*
* rally.yaml is a task that is run in gates against OpenStack (nova network)
* rally-neutron.yaml is a task that is run in gates against OpenStack with
Neutron Service
* rally-designate.yaml is a task that is run in gates against OpenStack with
Designate Service. It's experimental job. To trigger make a review with
"check experimental" text.
* rally-zaqar.yaml is a task that is run in gates against OpenStack with
Zaqar Service. It's experimental job. To trigger make a review with
"check experimental" text.
Useful links
------------
* More about Rally: https://rally.readthedocs.org/en/latest/
* How to add rally-gates: http://rally.readthedocs.io/en/latest/quick_start/gates.html
* About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html
* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins

View File

@ -1,11 +0,0 @@
Extra files
===========
All files from this directory will be copy pasted to gates, so you are able to
use absolute path in rally tasks. Files will be in ~/.rally/extra/*
murano/ directory
-----------------
Here we have Murano applications that is used to prepare Murano context and
to deploy environment.

View File

@ -1,56 +0,0 @@
#!/bin/sh
rand_int() {
od -An -tu -N1 /dev/urandom | tr -d ' '
}
cat << EOF
{
"additive": [
{
"title": "Statistics table from Hook",
"chart_plugin": "StatsTable",
"data": [
["Alice", $(rand_int)],
["Bob", $(rand_int)],
["Carol", $(rand_int)]]
},
{
"title": "StackedArea chart from Hook",
"description": "This is generated by ${0}",
"chart_plugin": "StackedArea",
"data": [
["Alpha", $(rand_int)],
["Beta", $(rand_int)],
["Gamma", $(rand_int)]]
}
],
"complete": [
{
"title": "Lines chart from Hook",
"description": "Random data generated by ${0}",
"chart_plugin": "Lines",
"axis_label": "X-axis label",
"label": "Y-axis label",
"data": [
["Foo", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]],
["Bar", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]],
["Spam", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]],
["Quiz", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]]
]
},
{
"title": "Pie chart from Hook",
"description": "Yet another data generated by ${0}",
"chart_plugin": "Pie",
"data": [
["Cat", $(rand_int)],
["Tiger", $(rand_int)],
["Jaguar", $(rand_int)],
["Panther", $(rand_int)],
["Lynx", $(rand_int)]
]
}
]
}
EOF

View File

@ -1,24 +0,0 @@
#!/bin/sh
set -e
main() {
cat > ~/dd_test.sh <<'EOF'
#!/bin/sh
time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
file=/tmp/test.img
c=1000 #1GB
write_seq_1gb=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
read_seq_1gb=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
[ -f $file ] && rm $file
echo "{
\"write_seq_1gb\": $write_seq_1gb,
\"read_seq_1gb\": $read_seq_1gb
}"
EOF
chmod a+x ~/dd_test.sh
}
main

View File

@ -1,107 +0,0 @@
#!/bin/sh
# Load server and output JSON results ready to be processed
# by Rally scenario
for ex in awk top grep free tr df dc dd gzip
do
if ! type ${ex} >/dev/null
then
echo "Executable is required by script but not available on a server: ${ex}" >&2
return 1
fi
done
get_used_cpu_percent() {
echo 100 $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %) - p | dc
}
get_used_ram_percent() {
local total=$(free | grep Mem: | awk '{print $2}')
local used=$(free | grep -- -/+\ buffers | awk '{print $3}')
echo ${used} 100 \* ${total} / p | dc
}
get_used_disk_percent() {
df -P / | grep -v Filesystem | awk '{print $5}' | tr -d %
}
get_seconds() {
(time -p ${1}) 2>&1 | awk '/real/{print $2}'
}
complete_load() {
local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh}
local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop}
local processes_num=${LOAD_PROCESSES_COUNT:-20}
local size=${LOAD_SIZE_MB:-5}
cat << EOF > ${script_file}
until test -e ${stop_file}
do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done
EOF
local sep
local cpu
local ram
local dis
rm -f ${stop_file}
for i in $(seq ${processes_num})
do
i=$((i-1))
sh ${script_file} &
cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]"
ram="${ram}${sep}[${i}, $(get_used_ram_percent)]"
dis="${dis}${sep}[${i}, $(get_used_disk_percent)]"
sep=", "
done
> ${stop_file}
cat << EOF
{
"title": "Generate load by spawning processes",
"description": "Each process runs gzip for ${size}M urandom data in a loop",
"chart_plugin": "Lines",
"axis_label": "Number of processes",
"label": "Usage, %",
"data": [
["CPU", [${cpu}]],
["Memory", [${ram}]],
["Disk", [${dis}]]]
}
EOF
}
additive_dd() {
local c=${1:-50} # Megabytes
local file=/tmp/dd_test.img
local write=$(get_seconds "dd if=/dev/urandom of=${file} bs=1M count=${c}")
local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}")
local gzip=$(get_seconds "gzip ${file}")
rm ${file}.gz
cat << EOF
{
"title": "Write, read and gzip file",
"description": "Using file '${file}', size ${c}Mb.",
"chart_plugin": "StackedArea",
"data": [
["write_${c}M", ${write}],
["read_${c}M", ${read}],
["gzip_${c}M", ${gzip}]]
},
{
"title": "Statistics for write/read/gzip",
"chart_plugin": "StatsTable",
"data": [
["write_${c}M", ${write}],
["read_${c}M", ${read}],
["gzip_${c}M", ${gzip}]]
}
EOF
}
cat << EOF
{
"additive": [$(additive_dd)],
"complete": [$(complete_load)]
}
EOF

File diff suppressed because it is too large Load Diff

View File

@ -1,128 +0,0 @@
{% set flavor_name = "m1.tiny" %}
{% set image_name = "^cirros.*-disk$" %}
{% set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %}
{% set smoke = 0 %}
---
version: 2
title: rally-neutron-existing-users.yaml
description: >
The task contains various scenarios that do not require admin user
subtasks:
-
title: Test main Cinder actions
workloads:
-
scenario:
CinderVolumes.create_volume:
size: 1
runner:
constant:
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
-
scenario:
CinderVolumes.create_volume:
size: 1
image:
name: {{image_name}}
runner:
constant:
times: 1
concurrency: 1
sla:
failure_rate:
max: 0
-
scenario:
CinderVolumes.create_snapshot_and_attach_volume:
volume_type: "lvmdriver-1"
size:
min: 1
max: 1
image:
name: {{image_name}}
flavor:
name: {{flavor_name}}
runner:
constant:
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
-
title: Test main Nova actions
workloads:
-
scenario:
NovaServers.boot_and_list_server:
flavor:
name: {{flavor_name}}
image:
name: {{image_name}}
detailed: True
runner:
constant:
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
-
title: Test main Glance actions
workloads:
-
scenario:
GlanceImages.create_and_delete_image:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
runner:
constant:
times: 1
concurrency: 1
sla:
failure_rate:
max: 100
-
title: Test main Neutron actions
workloads:
-
scenario:
NeutronNetworks.create_and_list_networks:
network_create_args:
runner:
constant:
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
-
scenario:
NeutronNetworks.create_and_list_subnets:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
constant:
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
-
scenario:
NeutronNetworks.create_and_list_floating_ips:
floating_network: "public"
floating_ip_args: {}
runner:
constant:
times: 2
concurrency: 2
sla:
failure_rate:
max: 0

View File

@ -1,817 +0,0 @@
{% set image_name = "^cirros.*-disk$" %}
{% set flavor_name = "m1.tiny" %}
{% set smoke = 0 %}
---
NeutronNetworks.create_and_list_networks:
-
args:
network_create_args: {}
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 20
-
args:
network_create_args:
provider:network_type: "vxlan"
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
roles:
- "admin"
sla:
failure_rate:
max: 20
NeutronNetworks.set_and_clear_router_gateway:
-
args:
network_create_args:
router:external: True
router_create_args: {}
runner:
type: "constant"
times: 4
concurrency: 2
context:
network: {}
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
router: -1
roles:
- "admin"
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_show_network:
-
args:
network_create_args: {}
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_list_subnets:
-
args:
network_create_args:
subnet_create_args:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_show_subnets:
-
args:
network_create_args:
subnet_create_args:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 20
NeutronSecurityGroup.create_and_list_security_groups:
-
args:
security_group_create_args: {}
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
security_group: -1
sla:
failure_rate:
max: 20
NeutronSecurityGroup.create_and_show_security_group:
-
args:
security_group_create_args: {}
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
security_group: -1
sla:
failure_rate:
max: 20
NeutronSecurityGroup.create_and_delete_security_groups:
-
args:
security_group_create_args: {}
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
security_group: -1
sla:
failure_rate:
max: 20
NeutronSecurityGroup.create_and_update_security_groups:
-
args:
security_group_create_args: {}
security_group_update_args: {}
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
security_group: -1
sla:
failure_rate:
max: 20
NeutronSecurityGroup.create_and_list_security_group_rules:
-
args:
security_group_args: {}
security_group_rule_args: {}
runner:
type: "constant"
times: {{smoke or 8 }}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
security_group: -1
sla:
failure_rate:
max: 20
NeutronSecurityGroup.create_and_show_security_group_rule:
-
args:
security_group_args: {}
security_group_rule_args: {}
runner:
type: "constant"
times: 8
concurrency: 4
context:
users:
tenants: 2
users_per_tenant: 1
quotas:
neutron:
security_group: -1
sla:
failure_rate:
max: 0
NeutronSecurityGroup.create_and_delete_security_group_rule:
-
args:
security_group_args: {}
security_group_rule_args: {}
runner:
type: "constant"
times: 4
concurrency: 4
context:
users:
tenants: 2
users_per_tenant: 1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_list_floating_ips:
-
args:
floating_network: "public"
floating_ip_args: {}
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
floatingip: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_list_routers:
-
args:
network_create_args:
subnet_create_args:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args:
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_show_routers:
-
args:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
type: "constant"
times: 4
concurrency: 2
context:
network: {}
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
subnet: -1
router: -1
NeutronNetworks.create_and_list_ports:
-
args:
network_create_args:
port_create_args:
ports_per_network: 4
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
router: -1
port: -1
sla:
failure_rate:
max: 20
NeutronNetworks.list_agents:
-
args:
agent_args: {}
runner:
type: "constant"
times: {{smoke or 4}}
concurrency: {{smoke or 2}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_show_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 2
runner:
type: "constant"
times: {{smoke or 4}}
concurrency: {{smoke or 2}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_update_networks:
-
args:
network_create_args: {}
network_update_args:
admin_state_up: False
name: "_updated"
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_update_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.4.0.0/16"
subnets_per_network: 2
subnet_update_args:
enable_dhcp: False
name: "_subnet_updated"
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_update_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
router_update_args:
admin_state_up: False
name: "_router_updated"
runner:
type: "constant"
times: {{smoke or 4}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_delete_networks:
-
args:
network_create_args: {}
runner:
type: "constant"
times: {{smoke or 20}}
concurrency: {{smoke or 10}}
context:
users:
tenants: {{smoke or 3}}
users_per_tenant: {{smoke or 2}}
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_delete_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 3}}
users_per_tenant: {{smoke or 2}}
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_delete_floating_ips:
-
args:
floating_network: "public"
floating_ip_args: {}
runner:
type: "constant"
times: {{smoke or 8}}
concurrency: {{smoke or 4}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
floatingip: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_delete_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
runner:
type: "constant"
times: {{smoke or 4}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_delete_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 5
runner:
type: "constant"
times: {{smoke or 4}}
concurrency: {{smoke or 4}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 20
NeutronNetworks.create_and_update_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 2
port_update_args:
admin_state_up: False
device_id: "dummy_id"
device_owner: "dummy_owner"
name: "_port_updated"
runner:
type: "constant"
times: {{smoke or 10}}
concurrency: {{smoke or 5}}
context:
network: {}
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 20
NeutronSubnets.delete_subnets:
-
runner:
type: "constant"
times: {{smoke or 15}}
concurrency: {{smoke or 15}}
context:
users:
tenants: 1
users_per_tenant: {{smoke or 15}}
user_choice_method: "round_robin"
quotas:
neutron:
network: -1
subnet: -1
network:
subnets_per_network: 15
dualstack: True
router: {}
Quotas.neutron_update:
-
args:
max_quota: 1024
runner:
type: "constant"
times: {{smoke or 10}}
concurrency: {{smoke or 2}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
sla:
failure_rate:
max: 0
NovaServers.boot_and_delete_server:
-
args:
auto_assign_nic: True
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
list_kwargs:
visibility: "public"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
network:
start_cidr: "10.2.0.0/24"
networks_per_tenant: 2
dns_nameservers:
- "8.8.8.8"
- "8.8.4.4"
router:
external: false
sla:
failure_rate:
max: 0
VMTasks.boot_runcommand_delete:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
command:
script_file: "~/.rally/extra/instance_test.sh"
interpreter: "/bin/sh"
username: "cirros"
runner:
type: "constant"
times: {{smoke or 2}}
concurrency: {{smoke or 2}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 2}}
network: {}
sla:
failure_rate:
max: 0
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
command:
script_file: "~/.rally/extra/instance_test.sh"
interpreter: "/bin/sh"
username: "cirros"
volume_args:
size: 2
runner:
type: "constant"
times: {{smoke or 2}}
concurrency: {{smoke or 2}}
context:
users:
tenants: {{smoke or 2}}
users_per_tenant: {{smoke or 1}}
network: {}
sla:
failure_rate:
max: 0
-
args:
flavor:
name: {{flavor_name}}
image:
name: {{image_name}}
floating_network: "public"
command:
script_inline: |
time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
file=/tmp/test.img
c=100 #100M
write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
[ -f $file ] && rm $file
echo "{
\"write_seq\": $write_seq,
\"read_seq\": $read_seq
}"
interpreter: "/bin/sh"
username: "cirros"
runner:
type: "constant"
times: 2
concurrency: 2
context:
users:
tenants: 1
users_per_tenant: 1
network: {}
sla:
failure_rate:
max: 0
-
args:
command:
# The `image_command_customizer` context prepares an image and
# executes `rally-jobs/extra/install_benchmark.sh` script. The script
# itself creates a new file inside the image "dd_test.sh" which is
# called in the scenario. It doesn't have anything related to
# VMTask.dd_load_test scenario
remote_path: "./dd_test.sh"
flavor:
name: "m1.tiny"
username: "cirros"
runner:
type: "constant"
times: 1
concurrency: 1
context:
image_command_customizer:
command:
local_path: "~/.rally/extra/install_benchmark.sh"
remote_path: "./install_benchmark.sh"
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
username: "cirros"
users:
tenants: 1
users_per_tenant: 1
network:
dns_nameservers: []
VMTasks.dd_load_test:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
floating_network: "public"
force_delete: false
username: "cirros"
runner:
type: "constant"
times: 2
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 1
network: {}

View File

@ -1,62 +0,0 @@
---
version: 2
title: Task for gate-rally-dsvm-watcher-rally-ubuntu-xenial-nv job
description: >
This task contains various scenarios for testing watcher plugins
subtasks:
-
title: Watcher.create_audit_and_delete tests
scenario:
Watcher.create_audit_and_delete: {}
runner:
constant:
times: 10
concurrency: 2
contexts:
users:
tenants: 2
users_per_tenant: 2
audit_templates:
audit_templates_per_admin: 5
fill_strategy: "round_robin"
params:
- goal:
name: "dummy"
strategy:
name: "dummy"
-
title: Watcher.create_audit_template_and_delete tests
scenario:
Watcher.create_audit_template_and_delete:
goal:
name: "dummy"
strategy:
name: "dummy"
runner:
constant:
times: 10
concurrency: 2
-
title: Watcher.list_audit_templates tests
scenario:
Watcher.list_audit_templates: {}
runner:
constant:
times: 10
concurrency: 2
contexts:
users:
tenants: 2
users_per_tenant: 2
audit_templates:
audit_templates_per_admin: 5
fill_strategy: "random"
params:
- goal:
name: "workload_balancing"
strategy:
name: "workload_stabilization"
- goal:
name: "dummy"
strategy:
name: "dummy"

View File

@ -1,757 +0,0 @@
{%- set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %}
---
KeystoneBasic.create_user:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.create_delete_user:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.create_user_set_enabled_and_delete:
-
args:
enabled: true
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
-
args:
enabled: false
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_list_tenants:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.get_entities:
-
runner:
type: "constant"
times: 20
concurrency: 10
sla:
failure_rate:
max: 0
-
args:
service_name: null
runner:
type: "constant"
times: 20
concurrency: 10
sla:
failure_rate:
max: 0
-
args:
service_name: "nova"
runner:
type: "constant"
times: 20
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.add_and_remove_user_role:
-
runner:
type: "constant"
times: 10
concurrency: 5
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_delete_role:
-
runner:
type: "constant"
times: 10
concurrency: 5
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_get_role:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 5
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 0
KeystoneBasic.create_add_and_list_user_roles:
-
runner:
type: "constant"
times: 10
concurrency: 5
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_list_roles:
-
runner:
type: "constant"
times: 10
concurrency: 2
context:
users:
tenants: 3
users_per_tenant: 2
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_list_users:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.create_tenant:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 10
sla:
failure_rate:
max: 0
KeystoneBasic.create_tenant_with_users:
-
args:
users_per_tenant: 10
runner:
type: "constant"
times: 10
concurrency: 10
context:
users:
tenants: 3
sla:
failure_rate:
max: 0
KeystoneBasic.create_user_update_password:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 5
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_update_user:
-
args:
create_user_kwargs: {}
update_user_kwargs:
enabled: False
runner:
type: "constant"
times: 10
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 0
KeystoneBasic.create_update_and_delete_tenant:
-
args: {}
runner:
type: "constant"
times: 10
concurrency: 5
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_delete_service:
-
runner:
type: "constant"
times: 10
concurrency: 5
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_list_services:
-
runner:
type: "constant"
times: 10
concurrency: 5
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_list_ec2credentials:
-
runner:
type: "constant"
times: 10
concurrency: 5
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 0
KeystoneBasic.create_and_delete_ec2credential:
-
runner:
type: "constant"
times: 10
concurrency: 5
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 0
Dummy.openstack:
-
args:
sleep: 0.01
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 8
users_per_tenant: 4
sla:
failure_rate:
max: 0
-
args:
sleep: 0.6
runner:
type: "constant"
concurrency: 2
times: 4
timeout: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
-
args:
sleep: 0.6
runner:
type: "rps"
rps: 2
times: 5
timeout: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
-
description: "Check 'quotas' context."
args:
sleep: 0.01
runner:
type: "constant"
times: 1
concurrency: 1
context:
quotas:
nova:
instances: 200
cores: 200
ram: -1
floating_ips: 200
fixed_ips: 200
metadata_items: -1
injected_files: -1
injected_file_content_bytes: -1
injected_file_path_bytes: -1
key_pairs: 500
security_groups: 400
security_group_rules: 600
cinder:
gigabytes: -1
snapshots: -1
volumes: -1
sla:
failure_rate:
max: 0
Authenticate.keystone:
-
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 2
users_per_tenant: 10
sla:
failure_rate:
max: 0
Authenticate.validate_glance:
-
args:
repetitions: 2
runner:
type: "constant"
times: 10
concurrency: 5
context:
users:
tenants: 3
users_per_tenant: 5
sla:
failure_rate:
max: 0
HttpRequests.check_request:
-
args:
url: "http://www.example.com"
method: "GET"
status_code: 200
runner:
type: "constant"
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
HttpRequests.check_random_request:
-
args:
requests:
-
url: "http://www.example.com"
method: "GET"
-
url: "http://localhost"
method: "GET"
status_code: 200
runner:
type: "constant"
times: 2
concurrency: 2
sla:
failure_rate:
max: 0
GlanceImages.list_images:
-
runner:
type: "constant"
times: 5
concurrency: 5
context:
users:
tenants: 1
users_per_tenant: 2
images:
image_url: "{{ cirros_image_url }}"
disk_format: "qcow2"
container_format: "bare"
images_per_tenant: 1
sla:
failure_rate:
max: 100
-
runner:
type: "constant"
times: 5
concurrency: 5
context:
users:
tenants: 2
users_per_tenant: 1
images:
image_url: "~/.rally/extra/fake-image.img"
disk_format: "qcow2"
container_format: "bare"
images_per_tenant: 2
sla:
failure_rate:
max: 100
GlanceImages.create_and_get_image:
-
args:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 4
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 2
api_versions:
glance:
version: 2
sla:
failure_rate:
max: 100
GlanceImages.create_and_download_image:
-
args:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 4
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 0
GlanceImages.create_and_delete_image:
# -
# args:
# image_location: "{{ cirros_image_url }}"
# container_format: "bare"
# disk_format: "qcow2"
# runner:
# type: "constant"
# times: 1
# concurrency: 1
# context:
# users:
# tenants: 2
# users_per_tenant: 3
# api_versions:
# glance:
# version: 1
# sla:
# failure_rate:
# max: 0
#
-
args:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 2
users_per_tenant: 3
api_versions:
glance:
version: 2
sla:
failure_rate:
max: 100
#
# -
# args:
# image_location: "{{ cirros_image_url }}"
# container_format: "bare"
# disk_format: "qcow2"
# runner:
# type: "constant"
# times: 1
# concurrency: 1
# context:
# users:
# tenants: 1
# users_per_tenant: 1
# api_versions:
# glance:
# version: 1
# roles:
# - admin
# sla:
# failure_rate:
# max: 0
GlanceImages.create_and_list_image:
# -
# args:
# image_location: "~/.rally/extra/fake-image.img"
# container_format: "bare"
# disk_format: "qcow2"
# runner:
# type: "constant"
# times: 1
# concurrency: 1
# context:
# users:
# tenants: 1
# users_per_tenant: 1
# api_versions:
# glance:
# version: 1
# sla:
# failure_rate:
# max: 0
#
-
args:
image_location: "~/.rally/extra/fake-image.img"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
api_versions:
glance:
version: 2
sla:
failure_rate:
max: 100
GlanceImages.create_image_and_boot_instances:
-
args:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
flavor:
name: "m1.tiny"
number_instances: 2
create_image_kwargs:
properties:
hw_video_model: vga
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 3
users_per_tenant: 1
sla:
failure_rate:
max: 100
GlanceImages.create_and_update_image:
-
args:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 4
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 100
GlanceImages.create_and_deactivate_image:
-
args:
image_location: "{{ cirros_image_url }}"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 4
concurrency: 2
context:
users:
tenants: 2
users_per_tenant: 2
sla:
failure_rate:
max: 100
SwiftObjects.create_container_and_object_then_list_objects:
-
args:
objects_per_container: 2
object_size: 5120
runner:
type: "constant"
times: 2
concurrency: 2
context:
users:
tenants: 1
users_per_tenant: 1
roles:
- "admin"
sla:
failure_rate:
max: 0
SwiftObjects.create_container_and_object_then_delete_all:
-
args:
objects_per_container: 5
object_size: 102400
runner:
type: "constant"
times: 4
concurrency: 2
context:
users:
tenants: 1
users_per_tenant: 1
roles:
- "admin"
sla:
failure_rate:
max: 0
SwiftObjects.create_container_and_object_then_download_object:
-
args:
objects_per_container: 5
object_size: 1024
runner:
type: "constant"
times: 6
concurrency: 3
context:
users:
tenants: 1
users_per_tenant: 1
roles:
- "admin"
sla:
failure_rate:
max: 0
SwiftObjects.list_and_download_objects_in_containers:
-
runner:
type: "constant"
times: 2
concurrency: 2
context:
users:
tenants: 1
users_per_tenant: 1
roles:
- "admin"
swift_objects:
containers_per_tenant: 1
objects_per_container: 5
object_size: 10240
sla:
failure_rate:
max: 0
SwiftObjects.list_objects_in_containers:
-
runner:
type: "constant"
times: 6
concurrency: 3
context:
users:
tenants: 1
users_per_tenant: 1
roles:
- "admin"
swift_objects:
containers_per_tenant: 1
objects_per_container: 10
object_size: 1024
sla:
failure_rate:
max: 0

View File

@ -1,3 +0,0 @@
---
image_name: "^cirros.*-disk$"

View File

@ -13,13 +13,10 @@
# under the License.
import os
import shutil
import tempfile
from unittest import mock
import ddt
import rally
from rally.common.plugin import plugin
from rally.common import validation
from rally.plugins.common import validators
@ -335,34 +332,12 @@ class RequiredParamOrContextValidatorTestCase(test.TestCase):
class FileExistsValidatorTestCase(test.TestCase):
rally_jobs_path = os.path.join(
os.path.dirname(rally.__file__), "..", "rally-jobs")
def setUp(self):
super(FileExistsValidatorTestCase, self).setUp()
self.validator = validators.FileExistsValidator(param_name="p",
required=False)
self.credentials = dict(openstack={"admin": mock.MagicMock(),
"users": [mock.MagicMock()], })
self.tmp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tmp_dir, ".rally"))
shutil.copytree(os.path.join(self.rally_jobs_path, "extra"),
os.path.join(self.tmp_dir, ".rally", "extra"))
self.original_home = os.environ["HOME"]
os.environ["HOME"] = self.tmp_dir
def return_home():
os.environ["HOME"] = self.original_home
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.addCleanup(return_home)
@mock.patch("rally.plugins.common.validators."
"FileExistsValidator._file_access_ok")
def test_file_exists(self, mock__file_access_ok):
self.validator.validate(self.credentials, {"args": {"p": "test_file"}},
None, None)
validator = validators.FileExistsValidator("p", required=False)
validator.validate({}, {"args": {"p": "test_file"}}, None, None)
mock__file_access_ok.assert_called_once_with(
"test_file", os.R_OK, "p", False)