Migrate zuul config v2 to native v3

1) Move main tempest job to ec2api-tempest-plugin.
2) Remove temporarily full tempest job (will be moved to ec2api-tempest)
3) Remove completely rally job. It is not working for years.

Change-Id: I44f609f1ff08acc770943108c3ec99c887561654
This commit is contained in:
Alexander Slipenchuk 2020-08-06 15:43:30 +03:00
parent 9ff4975fcd
commit da91dadfc2
16 changed files with 2 additions and 1181 deletions

View File

@ -1,41 +1,3 @@
- job:
name: ec2-api-functional-neutron
parent: legacy-dsvm-base
run: playbooks/legacy/ec2-api-functional-neutron/run.yaml
post-run: playbooks/legacy/ec2-api-functional-neutron/post.yaml
timeout: 7800
required-projects:
- openstack/devstack-gate
- openstack/ec2-api
- openstack/neutron
- openstack/neutron-tempest-plugin
- openstack/ec2api-tempest-plugin
- job:
name: ec2-api-functional-neutron-full
parent: legacy-dsvm-base
run: playbooks/legacy/ec2-api-functional-neutron-full/run.yaml
post-run: playbooks/legacy/ec2-api-functional-neutron-full/post.yaml
timeout: 10800
required-projects:
- openstack/devstack-gate
- openstack/ec2-api
- openstack/neutron
- x/swift3
- openstack/neutron-tempest-plugin
- openstack/ec2api-tempest-plugin
- job:
name: ec2-api-rally-fakevirt
parent: legacy-dsvm-base
run: playbooks/legacy/ec2-api-rally-fakevirt/run.yaml
post-run: playbooks/legacy/ec2-api-rally-fakevirt/post.yaml
timeout: 7800
required-projects:
- openstack/devstack-gate
- openstack/ec2-api
- openstack/rally
- project:
templates:
- check-requirements
@ -44,15 +6,13 @@
- publish-openstack-docs-pti
check:
jobs:
- ec2-api-functional-neutron
- ec2api-tempest-plugin-functional
gate:
queue: ec2-api
jobs:
- ec2-api-functional-neutron
- ec2api-tempest-plugin-functional
experimental:
jobs:
- ec2-api-functional-neutron-full
- ec2-api-rally-fakevirt
# TripleO jobs that deploy ec2-api.
# Note we don't use a project-template here, so it's easier
# to disable voting on one specific job if things go wrong.

View File

@ -1,15 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,63 +0,0 @@
- hosts: all
name: Autoconverted job legacy-ec2-api-functional-neutron-dsvm-full from old job
ec2-api-functional-neutron-dsvm-full-ubuntu-xenial-nv
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
enable_plugin neutron-tempest-plugin https://opendev.org/openstack/neutron-tempest-plugin
TEMPEST_PLUGINS='/opt/stack/new/ec2api-tempest-plugin'
RUN_LONG_TESTS=1
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export TEMPEST_CONCURRENCY=2
export DEVSTACK_GATE_NEUTRON=1
export PROJECTS="openstack/ec2-api $PROJECTS"
export PROJECTS="x/swift3 $PROJECTS"
export PROJECTS="openstack/neutron-tempest-plugin $PROJECTS"
export ENABLED_SERVICES+=,swift3
export PROJECTS="openstack/ec2api-tempest-plugin $PROJECTS"
export DEVSTACK_GATE_TEMPEST_REGEX="ec2api_tempest_plugin"
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,15 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,68 +0,0 @@
- hosts: all
name: Autoconverted job legacy-functional-neutron-dsvm-ec2api from old job gate-functional-neutron-dsvm-ec2api-ubuntu-xenial
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
enable_plugin neutron-tempest-plugin https://opendev.org/openstack/neutron-tempest-plugin
TEMPEST_PLUGINS='/opt/stack/new/ec2api-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export TEMPEST_CONCURRENCY=2
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
export PROJECTS="openstack/ec2-api $PROJECTS"
export PROJECTS="openstack/neutron-tempest-plugin $PROJECTS"
if [ "neutron" = "neutron" ] ; then
export DEVSTACK_GATE_NEUTRON=1
else
export DEVSTACK_GATE_NEUTRON=0
fi
export PROJECTS="openstack/ec2api-tempest-plugin $PROJECTS"
export DEVSTACK_GATE_TEMPEST_REGEX="ec2api_tempest_plugin"
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,41 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/rally-plot/**
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/rally-plot/extra/index.html
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,66 +0,0 @@
- hosts: all
name: Autoconverted job legacy-ec2-api-rally-dsvm-fakevirt from old job ec2-api-rally-dsvm-fakevirt-ubuntu-xenial-nv
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin ec2-api https://opendev.org/openstack/ec2-api
enable_plugin rally https://opendev.org/openstack/rally
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PROJECTS="openstack/ec2-api $PROJECTS"
export PROJECTS="openstack/rally $PROJECTS"
export DEVSTACK_GATE_VIRT_DRIVER=fake
export DEVSTACK_GATE_NEUTRON=1
export DEVSTACK_GATE_TEMPEST_NOTESTS=1
export RALLY_SCENARIO=ec2-api-fakevirt
export CEILOMETER_NOTIFICATION_TOPICS=notifications,profiler
function post_test_hook {
# run needed preparation steps for third-party project
if [ -f $BASE/new/ec2-api/rally-scenarios/post_test_hook.sh ] ; then
$BASE/new/ec2-api/rally-scenarios/post_test_hook.sh
fi
$BASE/new/rally/tests/ci/rally-gate.sh
}
export -f post_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,5 +0,0 @@
This directory contains rally benchmark scenarios to be run by OpenStack CI.
* more about rally: https://rally.readthedocs.io/en/latest/
* how to add rally-gates: https://rally.readthedocs.io/en/latest/quick_start/gates.html

View File

@ -1,84 +0,0 @@
---
EC2APIPlugin.describe_all_in_one:
-
runner:
type: "constant"
times: 16
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
prepare_ec2_client:
ec2api_servers:
flavor: "m1.ec2api-alt"
image: "*cirros*"
servers_per_tenant: 100
run_in_vpc: False
assign_floating_ip: False
build_timeout: 150
-
runner:
type: "constant"
times: 16
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
prepare_ec2_client:
ec2api_servers:
flavor: "m1.ec2api-alt"
image: "*cirros*"
servers_per_tenant: 100
servers_per_run: 4
run_in_vpc: True
assign_floating_ip: True
build_timeout: 150
# This context creates objects very long.
# EC2APIPlugin.describe_networks:
# -
# runner:
# type: "constant"
# times: 20
# concurrency: 1
# context:
# users:
# tenants: 1
# users_per_tenant: 1
# prepare_ec2_client:
# ec2api_networks:
# subnets_per_tenant: 50
# nis_per_subnet: 10
EC2APIPlugin.describe_regions:
-
runner:
type: "constant"
times: 100
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
prepare_ec2_client:
EC2APIPlugin.describe_images:
-
runner:
type: "constant"
times: 10
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
prepare_ec2_client:
fake_images:
disk_format: "ami"
container_format: "ami"
images_per_tenant: 2000

View File

@ -1,5 +0,0 @@
Extra files
===========
All files from this directory will be copy pasted to gates, so you are able to
use absolute path in rally tasks. Files will be in ~/.rally/extra/*

View File

@ -1,9 +0,0 @@
Rally plugins
=============
All *.py modules from this directory will be auto-loaded by Rally and all
plugins will be discoverable. There is no need of any extra configuration
and there is no difference between writing them here and in rally code base.
Note that it is better to push all interesting and useful benchmarks to Rally
code base, this simplifies administration for Operators.

View File

@ -1,88 +0,0 @@
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally.task import context
from rally_openstack import consts
from rally_openstack import osclients
from rally_openstack.wrappers import network as network_wrapper
LOG = logging.getLogger(__name__)
@context.configure(name="prepare_ec2_client", platform="openstack", order=110)
class PrepareEC2ClientContext(context.Context):
def __init__(self, ctx):
super(PrepareEC2ClientContext, self).__init__(ctx)
self.net_wrapper = network_wrapper.wrap(
osclients.Clients(self.context["admin"]["credential"]),
self, config=self.config)
self.net_wrapper.start_cidr = '10.0.0.0/16'
@logging.log_task_wrapper(LOG.info, _("Enter context: `EC2 creds`"))
def setup(self):
"""This method is called before the task start."""
try:
for user in self.context['users']:
clients = osclients.Clients(user['credential'])
keystone = clients.keystone
creds = keystone().ec2.list(user['id'])
if not creds:
creds = keystone().ec2.create(user['id'],
user['tenant_id'])
else:
creds = creds[0]
url = keystone.service_catalog.url_for(service_type='ec2')
user['ec2args'] = {
'region': 'RegionOne',
'url': url,
'access': creds.access,
'secret': creds.secret
}
if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON:
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
body = {"quota": {"router": -1, "floatingip": -1}}
self.net_wrapper.client.update_quota(tenant_id, body)
network = self.net_wrapper.create_network(
tenant_id, add_router=True, subnets_num=1)
self.context["tenants"][tenant_id]["network"] = network
except Exception as e:
msg = "Can't prepare ec2 client: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
@logging.log_task_wrapper(LOG.info, _("Exit context: `EC2 creds`"))
def cleanup(self):
try:
if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON:
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
network = self.context["tenants"][tenant_id]["network"]
self.net_wrapper.delete_network(network)
except Exception as e:
msg = "Can't cleanup ec2 client: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)

View File

@ -1,363 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import range
import time
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally.task import context
from ec2api.tests import botocoreclient
LOG = logging.getLogger(__name__)
class EC2Objects(context.Context):
CIDR = "10.0.0.0/16"
AWS_ZONE = "nova"
def run_instances(self, tenant_id, client, image_id):
flavor = self.config["flavor"]
servers_per_tenant = self.config["servers_per_tenant"]
LOG.info("Calling run_instance with image_id=%s "
"flavor=%s servers_per_tenant=%s"
% (image_id, flavor, servers_per_tenant))
servers_per_run = self.config["servers_per_run"]
while servers_per_tenant > 0:
if servers_per_tenant < servers_per_run:
servers_per_run = servers_per_tenant
kwargs = {"ImageId": image_id, "InstanceType": flavor,
"MinCount": servers_per_run, "MaxCount": servers_per_run}
if self.config.get("run_in_vpc", False):
subnet_id = self.prepare_network(tenant_id, client)
kwargs["SubnetId"] = subnet_id
data = client.run_instances(*[], **kwargs)
ids = [s['InstanceId'] for s in data['Instances']]
self.context["tenants"][tenant_id]["servers"] += ids
servers_per_tenant -= servers_per_run
def wait_for_instances(self, tenant_id, client):
LOG.info("waiting for running state")
ids = self.context["tenants"][tenant_id]["servers"]
start_time = time.time()
while True:
data = client.describe_instances(InstanceIds=ids)
for instance in data['Reservations'][0]['Instances']:
assert 'error' != instance['State']['Name']
if instance['State']['Name'] != 'running':
break
else:
break
time.sleep(5)
dtime = time.time() - start_time
assert dtime <= self.config["build_timeout"]
LOG.info("end of waiting")
def prepare_network(self, tenant_id, client, ni_count=0):
result = dict()
self.context["tenants"][tenant_id]["networks"].append(result)
data = client.create_vpc(CidrBlock=self.CIDR)
vpc_id = data['Vpc']['VpcId']
result["vpc_id"] = vpc_id
data = client.create_subnet(VpcId=vpc_id,
CidrBlock=self.CIDR, AvailabilityZone=self.AWS_ZONE)
subnet_id = data['Subnet']['SubnetId']
result["subnet_id"] = subnet_id
result["ni_ids"] = list()
for dummy in range(0, ni_count):
data = client.create_network_interface(SubnetId=subnet_id)
ni_id = data['NetworkInterface']['NetworkInterfaceId']
result["ni_ids"].append(ni_id)
time.sleep(1)
if self.config.get('assign_floating_ip', False):
data = client.create_internet_gateway()
gw_id = data['InternetGateway']['InternetGatewayId']
result["gw_id"] = gw_id
data = client.attach_internet_gateway(VpcId=vpc_id,
InternetGatewayId=gw_id)
data = client.describe_route_tables(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
# len(data['RouteTables']) should be 1
route_table_id = data['RouteTables'][0]['RouteTableId']
kwargs = {
'DestinationCidrBlock': '0.0.0.0/0',
'RouteTableId': route_table_id,
'GatewayId': gw_id
}
client.create_route(*[], **kwargs)
return subnet_id
def assign_floating_ips(self, tenant_id, client):
self.context["tenants"][tenant_id]["addresses"] = list()
if not self.config.get('assign_floating_ip', False):
return
LOG.info("assign floating ips")
ids = self.context["tenants"][tenant_id]["servers"]
for instance_id in ids:
self.assign_floating_ip(tenant_id, client, instance_id)
def assign_floating_ip(self, tenant_id, client, instance_id):
is_vpc = self.config.get("run_in_vpc", False)
kwargs = dict()
if is_vpc:
kwargs['Domain'] = 'vpc'
data = client.allocate_address(*[], **kwargs)
alloc_id = data.get('AllocationId')
public_ip = data['PublicIp']
kwargs = {'InstanceId': instance_id}
if is_vpc:
kwargs['AllocationId'] = alloc_id
else:
kwargs['PublicIp'] = public_ip
try:
data = client.associate_address(*[], **kwargs)
kwargs.pop('InstanceId')
self.context["tenants"][tenant_id]["addresses"].append(kwargs)
except Exception:
LOG.exception('')
kwargs.pop('InstanceId')
data = client.release_address(*[], **kwargs)
def terminate_instances_and_wait(self, tenant_id, client):
ids = self.context["tenants"][tenant_id].get("servers", [])
servers_per_run = self.config["servers_per_run"]
mod = len(ids) / servers_per_run
for i in range(0, mod):
part_ids = ids[i * servers_per_run:(i + 1) * servers_per_run]
data = client.terminate_instances(InstanceIds=part_ids)
part_ids = ids[mod * servers_per_run:]
if part_ids:
data = client.terminate_instances(InstanceIds=part_ids)
start_time = time.time()
while True:
try:
data = client.describe_instances(InstanceIds=ids)
except Exception:
break
if (len(data['Reservations']) == 0
or len(data['Reservations'][0]['Instances']) == 0):
break
for instance in data['Reservations'][0]['Instances']:
assert 'error' != instance['State']['Name']
if instance['State']['Name'] != 'terminated':
break
else:
break
time.sleep(5)
dtime = time.time() - start_time
assert dtime <= self.config["build_timeout"]
def release_addresses(self, tenant_id, client):
LOG.info("Cleanup addresses")
kwargss = self.context["tenants"][tenant_id].get("addresses", [])
for kwargs in kwargss:
try:
data = client.release_address(*[], **kwargs)
except Exception:
LOG.exception('')
def cleanup_networks(self, tenant_id, client):
LOG.info("Cleanup networks")
networks = self.context["tenants"][tenant_id].get("networks", [])
for network in networks:
vpc_id = network.get("vpc_id")
gw_id = network.get("gw_id")
if gw_id:
try:
data = client.detach_internet_gateway(
VpcId=vpc_id, InternetGatewayId=gw_id)
except Exception:
LOG.exception('')
time.sleep(1)
try:
data = client.delete_internet_gateway(
InternetGatewayId=gw_id)
except Exception:
LOG.exception('')
time.sleep(1)
ni_ids = network.get("ni_ids")
if ni_ids:
for ni_id in ni_ids:
try:
data = client.delete_network_interface(
NetworkInterfaceId=ni_id)
except Exception:
LOG.exception('')
time.sleep(1)
subnet_id = network.get("subnet_id")
if subnet_id:
try:
data = client.delete_subnet(SubnetId=subnet_id)
except Exception:
LOG.exception('')
time.sleep(1)
if vpc_id:
try:
data = client.delete_vpc(VpcId=vpc_id)
except Exception:
LOG.exception('')
@context.configure(name="ec2api_networks", platform="openstack", order=451)
class FakeNetworkGenerator(EC2Objects):
"""Context class for adding temporary networks for benchmarks.
Networks are added for each tenant.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"subnets_per_tenant": {
"type": "integer",
"minimum": 1
},
"nis_per_subnet": {
"type": "integer",
"minimum": 1
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"subnets_per_tenant": 5,
"nis_per_subnet": 5,
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `EC2 Networks`"))
def setup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
LOG.info("Creating networks for user tenant %s "
% (user["tenant_id"]))
args = user['ec2args']
client = botocoreclient.get_ec2_client(
args['url'], args['region'], args['access'], args['secret'])
self.context["tenants"][tenant_id]["networks"] = list()
subnets_count = self.config["subnets_per_tenant"]
nis_count = self.config["nis_per_subnet"]
for dummy in range(0, subnets_count):
self.prepare_network(tenant_id, client, nis_count)
@logging.log_task_wrapper(LOG.info, _("Exit context: `EC2 Networks`"))
def cleanup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
args = user['ec2args']
client = botocoreclient.get_ec2_client(
args['url'], args['region'], args['access'], args['secret'])
self.cleanup_networks(tenant_id, client)
@context.configure(name="ec2api_servers", platform="openstack", order=450)
class FakeServerGenerator(EC2Objects):
"""Context class for adding temporary servers for benchmarks.
Servers are added for each tenant.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image": {
"type": "string",
},
"flavor": {
"type": "string"
},
"servers_per_tenant": {
"type": "integer",
"minimum": 1
},
"run_in_vpc": {
"type": "boolean"
},
"assign_floating_ip": {
"type": "boolean"
},
"build_timeout": {
"type": "integer",
"minimum": 30
},
"servers_per_run": {
"type": "integer",
"minimum": 1
}
},
"required": ["image", "flavor"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"servers_per_tenant": 5,
"build_timeout": 30,
"servers_per_run": 10
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `EC2 Servers`"))
def setup(self):
image = self.config["image"]
image_id = None
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
LOG.info("Booting servers for user tenant %s "
% (user["tenant_id"]))
args = user['ec2args']
client = botocoreclient.get_ec2_client(
args['url'], args['region'], args['access'], args['secret'])
if image_id is None:
data = client.describe_images(
Filters=[{'Name': 'name', 'Values': [image]},
{'Name': 'image-type', 'Values': ['machine']}])
image_id = data['Images'][0]['ImageId']
self.context["tenants"][tenant_id]["servers"] = list()
self.context["tenants"][tenant_id]["networks"] = list()
self.run_instances(tenant_id, client, image_id)
self.wait_for_instances(tenant_id, client)
self.assign_floating_ips(tenant_id, client)
@logging.log_task_wrapper(LOG.info, _("Exit context: `EC2 Servers`"))
def cleanup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
args = user['ec2args']
client = botocoreclient.get_ec2_client(
args['url'], args['region'], args['access'], args['secret'])
self.terminate_instances_and_wait(tenant_id, client)
self.release_addresses(tenant_id, client)
self.cleanup_networks(tenant_id, client)

View File

@ -1,90 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally.task import context
from rally_openstack import consts
from rally_openstack import osclients
from ec2api.tests import botocoreclient
LOG = logging.getLogger(__name__)
@context.configure(name="fake_images", platform="openstack", order=411)
class FakeImageGenerator(context.Context):
"""Context class for adding images to each user for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"disk_format": {
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"],
},
"container_format": {
"type": "string",
},
"images_per_tenant": {
"type": "integer",
"minimum": 1
},
},
"required": ["disk_format", "container_format", "images_per_tenant"],
"additionalProperties": False
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `Images`"))
def setup(self):
disk_format = self.config["disk_format"]
container_format = self.config["container_format"]
images_per_tenant = self.config["images_per_tenant"]
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
glance = osclients.Clients(user["credential"]).glance().images
current_images = []
for i in range(images_per_tenant):
kw = {
"name": "image-" + tenant_id[0:8] + "-" + str(i),
"container_format": container_format,
"disk_format": disk_format,
}
image = glance.create(**kw)
glance.upload(image.id, '', image_size=1000000)
current_images.append(image.id)
self.context["tenants"][tenant_id]["images"] = current_images
# NOTE(andrey-mp): call ec2 api to initialize it
args = user['ec2args']
client = botocoreclient.get_ec2_client(
args['url'], args['region'], args['access'], args['secret'])
data = client.describe_images()
@logging.log_task_wrapper(LOG.info, _("Exit context: `Images`"))
def cleanup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
glance = osclients.Clients(user["credential"]).glance().images
for image in self.context["tenants"][tenant_id].get("images", []):
with logging.ExceptionLogger(
LOG,
_("Failed to delete network for tenant %s")
% tenant_id):
glance.delete(image)

View File

@ -1,224 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import atomic
from rally_openstack import osclients
from rally_openstack import scenario
from ec2api.tests import botocoreclient
LOG = logging.getLogger(__name__)
_resources = dict()
class EC2APIPlugin(scenario.OpenStackScenario):
def __init__(self, *args, **kwargs):
super(EC2APIPlugin, self).__init__(*args, **kwargs)
if 'instance_id' in _resources:
self.instance_id = _resources['instance_id']
else:
client = self.get_ec2_client()
data = client.describe_instances()
instances = (data['Reservations'][0]['Instances']
if data.get('Reservations') else None)
if instances:
index = len(instances) / 3
self.instance_id = instances[index]['InstanceId']
LOG.info("found instance = %s for ec2" % (self.instance_id))
_resources['instance_id'] = self.instance_id
else:
_resources['instance_id'] = None
if 'nova_server_id' in _resources:
self.nova_server_id = _resources['nova_server_id']
else:
client = osclients.Clients(
self.context['user']['credential']).nova()
project_id = self.context["tenant"]["id"]
servers = client.servers.list(
search_opts={'project_id': project_id})
if servers:
index = len(servers) / 3
self.nova_server_id = servers[index].id
LOG.info("found server = %s for nova" % (self.nova_server_id))
_resources['nova_server_id'] = self.nova_server_id
else:
_resources['nova_server_id'] = None
def get_ec2_client(self):
args = self.context['user']['ec2args']
client = botocoreclient.get_ec2_client(
args['url'], args['region'], args['access'], args['secret'])
return client
def describe_images_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_images_ec2api'):
data = client.describe_images()
def describe_images_glance(self):
client = osclients.Clients(
self.context['user']['credential']).glance().images
with atomic.ActionTimer(self, 'describe_images_glance'):
data = list(client.list())
def describe_addresses_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_addresses_ec2api'):
data = client.describe_addresses()
def describe_instances_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_instances_ec2api'):
data = client.describe_instances()
def describe_one_instance_ec2api(self):
if not self.instance_id:
return
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_one_instance_ec2api'):
data = client.describe_instances(InstanceIds=[self.instance_id])
def describe_instances_nova(self):
nova = osclients.Clients(
self.context['user']['credential']).nova()
project_id = self.context["tenant"]["id"]
with atomic.ActionTimer(self, 'describe_instances_nova'):
nova.servers.list(search_opts={'project_id': project_id})
def describe_one_instance_nova(self):
if not self.nova_server_id:
return
nova = osclients.Clients(
self.context['user']['credential']).nova()
with atomic.ActionTimer(self, 'describe_one_instance_nova'):
nova.servers.get(self.nova_server_id)
def describe_vpcs_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_vpcs_ec2api'):
data = client.describe_vpcs()
def describe_subnets_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_subnets_ec2api'):
data = client.describe_subnets()
def describe_network_interfaces_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_network_interfaces_ec2api'):
data = client.describe_network_interfaces()
def describe_route_tables_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_route_tables_ec2api'):
data = client.describe_route_tables()
def describe_security_groups_ec2api(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_security_groups_ec2api'):
data = client.describe_security_groups()
def describe_floatingips_neutron(self):
neutron = osclients.Clients(
self.context['user']['credential']).neutron()
project_id = self.context["tenant"]["id"]
with atomic.ActionTimer(self, 'describe_addesses_neutron'):
neutron.list_floatingips(tenant_id=project_id)
def describe_networks_neutron(self):
neutron = osclients.Clients(
self.context['user']['credential']).neutron()
project_id = self.context["tenant"]["id"]
with atomic.ActionTimer(self, 'describe_networks_neutron'):
neutron.list_networks(tenant_id=project_id)
def describe_subnets_neutron(self):
neutron = osclients.Clients(
self.context['user']['credential']).neutron()
project_id = self.context["tenant"]["id"]
with atomic.ActionTimer(self, 'describe_subnets_neutron'):
neutron.list_subnets(tenant_id=project_id)
def describe_ports_neutron(self):
neutron = osclients.Clients(
self.context['user']['credential']).neutron()
project_id = self.context["tenant"]["id"]
with atomic.ActionTimer(self, 'describe_ports_neutron'):
neutron.list_ports(tenant_id=project_id)
def describe_security_groups_neutron(self):
neutron = osclients.Clients(
self.context['user']['credential']).neutron()
project_id = self.context["tenant"]["id"]
with atomic.ActionTimer(self, 'describe_security_groups_neutron'):
neutron.list_security_groups(tenant_id=project_id)
@scenario.configure(name="EC2APIPlugin.describe_images", platform="openstack")
class DescribeImages(EC2APIPlugin):
def run(self):
self.describe_images_ec2api()
self.describe_images_glance()
@scenario.configure(name="EC2APIPlugin.describe_regions", platform="openstack")
class DescribeRegions(EC2APIPlugin):
def run(self):
client = self.get_ec2_client()
with atomic.ActionTimer(self, 'describe_regions_ec2api'):
data = client.describe_regions()
@scenario.configure(name="EC2APIPlugin.describe_all_in_one",
platform="openstack")
class DescribeAllInOne(EC2APIPlugin):
def run(self):
self.describe_addresses_ec2api()
self.describe_floatingips_neutron()
self.describe_instances_ec2api()
self.describe_one_instance_ec2api()
self.describe_instances_nova()
self.describe_one_instance_nova()
self.describe_vpcs_ec2api()
self.describe_subnets_ec2api()
self.describe_network_interfaces_ec2api()
self.describe_route_tables_ec2api()
self.describe_security_groups_ec2api()
self.describe_networks_neutron()
self.describe_subnets_neutron()
self.describe_ports_neutron()
self.describe_security_groups_neutron()
@scenario.configure(name="EC2APIPlugin.describe_networks",
platform="openstack")
class DescribeNetworks(EC2APIPlugin):
def run(self):
self.describe_vpcs_ec2api()
self.describe_subnets_ec2api()
self.describe_network_interfaces_ec2api()
self.describe_route_tables_ec2api()
self.describe_security_groups_ec2api()
self.describe_networks_neutron()
self.describe_subnets_neutron()
self.describe_ports_neutron()
self.describe_security_groups_neutron()

View File

@ -1,3 +0,0 @@
#!/bin/bash -x
# Any preparation commands