Add rally-scenarios test to plugin repo

Change-Id: Id229171be113bfd6c952e16404d082443517305d
This commit is contained in:
Artem Savinov 2016-06-24 16:10:26 +03:00
parent e8bfab8631
commit 0bc425cd31
62 changed files with 1522 additions and 0 deletions

View File

@ -0,0 +1,33 @@
---
CinderVolumes.create_and_attach_volume:
-
args:
size: 10
image:
name: "TestVM-VMDK"
flavor:
name: "m1.nano"
create_vm_params:
availability_zone: "vcenter"
create_volume_params:
availability_zone: "vcenter-cinder"
runner:
type: "constant"
times: {{ compute * 9 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
quotas:
cinder:
volumes: -1
gigabytes: -1
snapshots: -1
api_versions:
cinder:
version: 2
service_name: cinderv2
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,27 @@
---
CinderVolumes.create_and_extend_volume:
-
args:
size: 1
new_size: 2
availability_zone: "vcenter-cinder"
runner:
type: "constant"
times: {{ compute * 14 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
quotas:
cinder:
volumes: -1
gigabytes: -1
snapshots: -1
api_versions:
cinder:
version: 2
service_name: cinderv2
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,54 @@
---
CinderVolumes.create_and_list_volume:
-
args:
size: 1
detailed: True
availability_zone: "vcenter-cinder"
runner:
type: "constant"
times: {{ compute * 14 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
quotas:
cinder:
volumes: -1
gigabytes: -1
snapshots: -1
api_versions:
cinder:
version: 2
service_name: cinderv2
sla:
failure_rate:
max: 0
-
args:
size: 1
detailed: True
image:
name: "TestVM-VMDK"
availability_zone: "vcenter-cinder"
runner:
type: "constant"
times: {{ compute * 14 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
quotas:
cinder:
volumes: -1
gigabytes: -1
snapshots: -1
api_versions:
cinder:
version: 2
service_name: cinderv2
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,18 @@
---
GlanceImages.create_and_delete_image:
-
args:
image_location: "http://172.18.170.69:5000/distr/nsx/tcl.vmdk"
container_format: "bare"
disk_format: "vmdk"
runner:
type: "constant"
times: {{ compute * 6 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,18 @@
---
GlanceImages.create_and_list_image:
-
args:
image_location: "http://172.18.170.69:5000/distr/nsx/tcl.vmdk"
container_format: "bare"
disk_format: "vmdk"
runner:
type: "constant"
times: {{ compute * 6 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,19 @@
---
GlanceImages.list_images:
-
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ compute * 2 }}
users_per_tenant: 3
images:
image_url: "http://172.18.170.69:5000/distr/nsx/tcl.vmdk"
image_type: "vmdk"
image_container: "bare"
images_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,16 @@
---
HeatStacks.create_and_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/default.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,24 @@
---
HeatStacks.create_and_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/server_with_ports.yml.template"
parameters:
public_net: "{{ floating_net }}"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
quotas:
neutron:
port: -1
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,21 @@
---
HeatStacks.create_and_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/server_with_volume.yml.template"
runner:
timeout: 90
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
api_versions:
cinder:
version: 2
service_name: cinderv2
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,16 @@
---
HeatStacks.create_and_list_stack:
-
args:
template_path: "{{ current_path }}/templates/default.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 4
users_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,16 @@
---
HeatStacks.create_check_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/random_strings.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,16 @@
---
HeatStacks.create_suspend_resume_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/random_strings.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 3
users_per_tenant: 2
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.create_update_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/random_strings.yml.template"
updated_template_path: "{{ current_path }}/templates/updated_random_strings_add.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.create_update_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/random_strings.yml.template"
updated_template_path: "{{ current_path }}/templates/updated_random_strings_delete.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.create_update_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/resource_group.yml.template"
updated_template_path: "{{ current_path }}/templates/updated_resource_group_increase.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.create_update_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/autoscaling_policy.yml.template"
updated_template_path: "{{ current_path }}/templates/updated_autoscaling_policy_inplace.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.create_update_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/resource_group.yml.template"
updated_template_path: "{{ current_path }}/templates/updated_resource_group_reduce.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.create_update_delete_stack:
-
args:
template_path: "{{ current_path }}/templates/random_strings.yml.template"
updated_template_path: "{{ current_path }}/templates/updated_random_strings_replace.yml.template"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
---
HeatStacks.list_stacks_and_resources:
-
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 3
stacks:
stacks_per_tenant: 2
resources_per_stack: 10
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,17 @@
heat_template_version: 2013-05-23
resources:
test_group:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 0
max_size: 0
min_size: 0
resource:
type: OS::Heat::RandomString
test_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: test_group }
scaling_adjustment: 1

View File

@ -0,0 +1 @@
heat_template_version: 2014-10-16

View File

@ -0,0 +1,13 @@
heat_template_version: 2014-10-16
description: Test template for rally create-update-delete scenario
resources:
test_string_one:
type: OS::Heat::RandomString
properties:
length: 20
test_string_two:
type: OS::Heat::RandomString
properties:
length: 20

View File

@ -0,0 +1,13 @@
heat_template_version: 2014-10-16
description: Test template for rally create-update-delete scenario
resources:
test_group:
type: OS::Heat::ResourceGroup
properties:
count: 2
resource_def:
type: OS::Heat::RandomString
properties:
length: 20

View File

@ -0,0 +1,64 @@
heat_template_version: 2013-05-23
parameters:
# set all correct defaults for parameters before launch test
public_net:
type: string
image:
type: string
default: TestVM-VMDK
flavor:
type: string
default: m1.nano
cidr:
type: string
default: 11.11.11.0/24
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
availability_zone: vcenter
networks:
- port: { get_resource: server_port }
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: {get_param: public_net}
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: router }
subnet_id: { get_resource: private_subnet }
private_net:
type: OS::Neutron::Net
private_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: private_net }
cidr: {get_param: cidr}
port_security_group:
type: OS::Neutron::SecurityGroup
properties:
name: default_port_security_group
description: >
Default security group assigned to port. The neutron default group is not
used because neutron creates several groups with the same name=default and
nova cannot chooses which one should it use.
server_port:
type: OS::Neutron::Port
properties:
network_id: {get_resource: private_net}
fixed_ips:
- subnet: { get_resource: private_subnet }
security_groups:
- { get_resource: port_security_group }

View File

@ -0,0 +1,40 @@
heat_template_version: 2013-05-23
parameters:
# set all correct defaults for parameters before launch test
image:
type: string
default: TestVM-VMDK
flavor:
type: string
default: m1.nano
availability_zone:
type: string
description: The Availability Zone to launch the instance.
default: vcenter-cinder
volume_size:
type: number
description: Size of the volume to be created.
default: 1
constraints:
- range: { min: 1, max: 1024 }
description: must be between 1 and 1024 Gb.
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
availability_zone: vcenter
cinder_volume:
type: OS::Cinder::Volume
properties:
size: { get_param: volume_size }
availability_zone: { get_param: availability_zone }
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: cinder_volume }
instance_uuid: { get_resource: server}
mountpoint: /dev/vdc

View File

@ -0,0 +1,23 @@
heat_template_version: 2013-05-23
description: >
Test template for create-update-delete-stack scenario in rally.
The template updates resource parameters without resource re-creation(replacement)
in the stack defined by autoscaling_policy.yaml.template. It allows to measure
performance of "pure" resource update operation only.
resources:
test_group:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 0
max_size: 0
min_size: 0
resource:
type: OS::Heat::RandomString
test_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: test_group }
scaling_adjustment: -1

View File

@ -0,0 +1,19 @@
heat_template_version: 2014-10-16
description: >
Test template for create-update-delete-stack scenario in rally.
The template updates the stack defined by random_strings.yaml.template with additional resource.
resources:
test_string_one:
type: OS::Heat::RandomString
properties:
length: 20
test_string_two:
type: OS::Heat::RandomString
properties:
length: 20
test_string_three:
type: OS::Heat::RandomString
properties:
length: 20

View File

@ -0,0 +1,11 @@
heat_template_version: 2014-10-16
description: >
Test template for create-update-delete-stack scenario in rally.
The template deletes one resource from the stack defined by random_strings.yaml.template.
resources:
test_string_one:
type: OS::Heat::RandomString
properties:
length: 20

View File

@ -0,0 +1,19 @@
heat_template_version: 2014-10-16
description: >
Test template for create-update-delete-stack scenario in rally.
The template deletes one resource from the stack defined by
random_strings.yaml.template and re-creates it with the updated parameters
(so-called update-replace). That happens because some parameters cannot be
changed without resource re-creation. The template allows to measure performance
of update-replace operation.
resources:
test_string_one:
type: OS::Heat::RandomString
properties:
length: 20
test_string_two:
type: OS::Heat::RandomString
properties:
length: 40

View File

@ -0,0 +1,16 @@
heat_template_version: 2014-10-16
description: >
Test template for create-update-delete-stack scenario in rally.
The template updates one resource from the stack defined by resource_group.yaml.template
and adds children resources to that resource.
resources:
test_group:
type: OS::Heat::ResourceGroup
properties:
count: 3
resource_def:
type: OS::Heat::RandomString
properties:
length: 20

View File

@ -0,0 +1,16 @@
heat_template_version: 2014-10-16
description: >
Test template for create-update-delete-stack scenario in rally.
The template updates one resource from the stack defined by resource_group.yaml.template
and deletes children resources from that resource.
resources:
test_group:
type: OS::Heat::ResourceGroup
properties:
count: 1
resource_def:
type: OS::Heat::RandomString
properties:
length: 20

View File

@ -0,0 +1,19 @@
---
NeutronNetworks.create_and_delete_networks:
-
args:
network_create_args: {}
runner:
type: "constant"
times: {{ vlan_amount if (not gre_enabled and compute * 8 > vlan_amount) else compute * 8 }}
concurrency: {{ concurrency * 2 }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,22 @@
---
NeutronNetworks.create_and_delete_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 50
runner:
type: "constant"
times: {{ vlan_amount if (not gre_enabled and compute > vlan_amount) else compute }}
concurrency: {{ concurrency * 2 }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,26 @@
---
NeutronNetworks.create_and_delete_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 2 > vlan_amount) else compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
port: -1
router: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,24 @@
---
NeutronSecGroupPlugin.create_and_delete_secgroups:
-
args:
security_group_count: 10
rules_per_security_group: 25
runner:
type: "constant"
times: {{ vlan_amount if (not gre_enabled and compute * 10 > vlan_amount) else compute * 10 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 10
users_per_tenant: 2
quotas:
nova:
security_groups: -1
security_group_rules: -1
neutron:
security_group: -1
security_group_rule: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,23 @@
---
NeutronNetworks.create_and_delete_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 5
runner:
type: "constant"
times: {{ vlan_amount if (not gre_enabled and compute * 4 > vlan_amount) else compute * 4 }}
concurrency: {{ concurrency * 2 }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,19 @@
---
NeutronNetworks.create_and_list_networks:
-
args:
network_create_args: {}
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 10 > vlan_amount) else compute * 10 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,22 @@
---
NeutronNetworks.create_and_list_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 10
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 2 > vlan_amount) else compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,25 @@
---
NeutronNetworks.create_and_list_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 5
router_create_args: {}
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 8 > vlan_amount) else compute * 8 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,24 @@
---
NeutronSecGroupPlugin.create_and_list_secgroups:
-
args:
security_group_count: 10
rules_per_security_group: 10
runner:
type: "constant"
times: 1
concurrency: {{ concurrency }}
context:
users:
tenants: 10
users_per_tenant: 2
quotas:
nova:
security_groups: -1
security_group_rules: -1
neutron:
security_group: -1
security_group_rule: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,23 @@
---
NeutronNetworks.create_and_list_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 10
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 5 > vlan_amount) else compute * 5 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,27 @@
---
NeutronNetworks.create_and_update_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 5
port_update_args:
admin_state_up: False
device_id: "dummy_id"
device_owner: "dummy_owner"
name: "_port_updated"
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 2 > vlan_amount) else compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,30 @@
---
NeutronNetworks.create_and_update_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
router_update_args:
admin_state_up: False
name: "_router_updated"
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 2 > vlan_amount) else compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
port: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,26 @@
---
NeutronNetworks.create_and_update_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.4.0.0/16"
subnets_per_network: 10
subnet_update_args:
enable_dhcp: False
name: "_subnet_updated"
runner:
type: "constant"
times: {{ vlan_amount - 10 if (not gre_enabled and compute * 2 > vlan_amount) else compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,16 @@
---
Quotas.neutron_update:
-
args:
max_quota: 1024
runner:
type: "constant"
times: {{ compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 20
users_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,43 @@
---
NovaServers.boot_and_bounce_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
actions:
-
hard_reboot: 1
-
soft_reboot: 1
-
stop_start: 1
-
rescue_unrescue: 1
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/25" if gre_enabled else "1.0.0.0/25" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,34 @@
---
NovaServers.boot_and_delete_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
runner:
type: "constant"
times: {{ compute * 20 }}
concurrency: {{ concurrency * 4 }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/21" if gre_enabled else "1.0.0.0/21" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,35 @@
---
NovaKeypair.boot_and_delete_server_with_keypair:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
key_pairs: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/25" if gre_enabled else "1.0.0.0/25" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,38 @@
---
NeutronSecGroupPlugin.boot_and_delete_server_with_secgroups:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
security_group_count: 10
rules_per_security_group: 10
runner:
type: "constant"
times: {{ 10 * compute }}
concurrency: {{ concurrency * 4 }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
security_group: -1
security_group_rule: -1
network:
start_cidr: {{ "100.1.0.0/22" if gre_enabled else "1.0.0.0/22" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,38 @@
---
NovaSecGroup.boot_and_delete_server_with_secgroups:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
security_group_count: 10
rules_per_security_group: 10
runner:
type: "constant"
times: {{ 10 * compute }}
concurrency: {{ concurrency * 4 }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
security_group: -1
security_group_rule: -1
network:
start_cidr: {{ "100.1.0.0/22" if gre_enabled else "1.0.0.0/22" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,35 @@
---
NovaServers.boot_and_list_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
detailed: True
runner:
type: "constant"
times: {{ compute * 20 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ [floating_ip_amount - 5, vlan_amount, compute * 20] | sort | first }}
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/21" if gre_enabled else "1.0.0.0/21" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,36 @@
---
NovaServers.boot_and_list_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
detailed: True
min_count: {{ compute }}
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/25" if gre_enabled else "1.0.0.0/25" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,36 @@
---
NovaServers.boot_and_list_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
detailed: True
min_count: 10
runner:
type: "constant"
times: {{ compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ [floating_ip_amount - 5, vlan_amount, compute * 20] | sort | first }}
users_per_tenant: 3
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/24" if gre_enabled else "1.0.0.0/24" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,36 @@
---
NovaServers.boot_and_rebuild_server:
-
args:
flavor:
name: "m1.nano"
from_image:
name: "TestVM-VMDK"
to_image:
name: "TestVM-VMDK"
runner:
type: constant
times: {{ compute * 2 }}
concurrency: {{ concurrency }}
context:
users:
tenants: {{ [floating_ip_amount - 10 , vlan_amount, compute * 20] | sort | first }}
users_per_tenant: 3
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/21" if gre_enabled else "1.0.0.0/21" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,23 @@
---
NovaKeypair.create_and_delete_keypair:
-
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
key_pairs: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,24 @@
---
NovaSecGroup.create_and_delete_secgroups:
-
args:
security_group_count: 10
rules_per_security_group: 25
runner:
type: "constant"
times: {{ 10 * compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 10
users_per_tenant: 2
quotas:
nova:
security_groups: -1
security_group_rules: -1
neutron:
security_group: -1
security_group_rule: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,23 @@
---
NovaKeypair.create_and_list_keypairs:
-
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
key_pairs: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,24 @@
---
NovaSecGroup.create_and_list_secgroups:
-
args:
security_group_count: 10
rules_per_security_group: 10
runner:
type: "constant"
times: {{ 10 * compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 10
users_per_tenant: 2
quotas:
nova:
security_groups: -1
security_group_rules: -1
neutron:
security_group: -1
security_group_rule: -1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,16 @@
---
Quotas.nova_update_and_delete:
-
args:
max_quota: 1024
runner:
type: "constant"
times: {{ compute }}
concurrency: 1
context:
users:
tenants: 3
users_per_tenant: 2
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,37 @@
---
NovaServers.resize_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
to_flavor:
name: "m1.small"
confirm: true
runner:
type: "constant"
times: {{ compute * 20 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/21" if gre_enabled else "1.0.0.0/21" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,34 @@
---
NovaServers.snapshot_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
runner:
type: "constant"
times: {{ compute * 12 }}
concurrency: {{ concurrency }}
context:
users:
tenants: 2
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/22" if gre_enabled else "1.0.0.0/22" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0

View File

@ -0,0 +1,35 @@
---
NovaServers.suspend_and_resume_server:
-
args:
flavor:
name: "m1.nano"
image:
name: "TestVM-VMDK"
force_delete: false
runner:
type: "constant"
times: {{ compute }}
concurrency: {{ concurrency }}
context:
users:
tenants: 3
users_per_tenant: 2
quotas:
nova:
instances: -1
cores: -1
ram: -1
floating_ips: -1
security_groups: -1
security_group_rules: -1
neutron:
network: -1
subnet: -1
port: -1
network:
start_cidr: {{ "100.1.0.0/21" if gre_enabled else "1.0.0.0/21" }}
networks_per_tenant: 1
sla:
failure_rate:
max: 0