Delete tempest code in repo and use python-tempest instead

Change-Id: Id6834d1d87543d94bd4b8e7fd0acec69a5354cb9
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2017-08-09 13:43:55 +08:00
parent caabfbf1f6
commit 8beea33471
209 changed files with 18 additions and 21561 deletions

View File

@ -16,9 +16,7 @@ from oslo_log import log as logging
from tempest import config
import tempest.test
from daisyclient.v1 import client as daisy_client
from tempest.api.daisy.v1.fake.logical_network_fake import FakeDiscoverHosts
CONF = config.CONF
from daisy_tempest.v1.fake.logical_network_fake import FakeDiscoverHosts
LOG = logging.getLogger(__name__)
@ -33,7 +31,7 @@ class BaseDaisyTest(tempest.test.BaseTestCase):
def resource_setup(cls):
super(BaseDaisyTest, cls).resource_setup()
cls.daisy_version = 1.0
cls.daisy_endpoint = CONF.daisy.daisy_endpoint
cls.daisy_endpoint = "http://127.0.0.1:19292"
cls.daisy_client = daisy_client.Client(version=cls.daisy_version,
endpoint=cls.daisy_endpoint)
@ -533,3 +531,11 @@ class BaseDaisyTest(tempest.test.BaseTestCase):
@classmethod
def add_fake_node(cls, num):
return cls.daisy_client.hosts.add(**FakeDiscoverHosts.daisy_data[num])
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
self.assertEqual(msg, str(e))
self.assertTrue(isinstance(e, exc),
"Expected %s, got %s" % (exc, type(e)))

View File

@ -1,12 +1,9 @@
from tempest.api.daisy import base
from tempest import config
from daisy_tempest import base
from nose.tools import set_trace
from daisyclient import exc as client_exc
import copy
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class DaisyCinderVolumeTest(base.BaseDaisyTest):

View File

@ -16,12 +16,10 @@
import logging
from tempest.api.daisy import base
from tempest import config
from daisy_tempest import base
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
import copy
CONF = config.CONF
LOG = logging.getLogger(__name__)

View File

@ -1,8 +1,6 @@
from tempest.api.daisy import base
from tempest import config
CONF = config.CONF
from daisy_tempest import base
class DaisyComponentTest(base.BaseDaisyTest):

View File

@ -13,9 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.daisy import base
from tempest import config
CONF = config.CONF
from daisy_tempest import base
class DaisyConfigFileTest(base.BaseDaisyTest):

View File

@ -13,12 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.daisy import base
from tempest import config
from daisy_tempest import base
import time
from daisyclient import exc as client_exc
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class DaisyDiscoverHostTest(base.BaseDaisyTest):
@ -102,7 +100,7 @@ class DaisyDiscoverHostTest(base.BaseDaisyTest):
pass
def test_discover_host(self):
daisy_endpoint = CONF.daisy.daisy_endpoint
daisy_endpoint = "http://127.0.0.1:19292"
def GetMiddleStr(content, startStr, endStr):
startIndex = content.index(startStr)

View File

@ -2,12 +2,9 @@
import copy
from daisyclient import exc as client_exc
from tempest.api.daisy import base
from tempest import config
from daisy_tempest import base
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class TecsLogicalNetworkTest(base.BaseDaisyTest):
LOGICAL_FILTER = ['name', 'physnet_name', 'segmentation_id',

View File

@ -1,8 +1,6 @@
from tempest.api.daisy import base
from tempest import config
CONF = config.CONF
from daisy_tempest import base
class DaisyServiceTest(base.BaseDaisyTest):

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
[DEFAULT]
# The list of modules to copy from openstack-common
module=install_venv_common
module=versionutils
# The base module to hold the copy of openstack.common
base=tempest

View File

@ -1,26 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
testtools>=0.9.36,!=1.2.0
boto>=2.32.1
paramiko>=1.13.0
netaddr>=0.7.12
python-glanceclient>=0.15.0
python-cinderclient>=1.1.0
python-heatclient>=0.3.0
testrepository>=0.0.18
oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0
oslo.config>=1.9.3,<1.10.0 # Apache-2.0
oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0
oslo.log>=1.0.0,<1.1.0 # Apache-2.0
oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0
oslo.utils>=1.4.0,<1.5.0 # Apache-2.0
six>=1.9.0
iso8601>=0.1.9
fixtures>=0.3.14
testscenarios>=0.4
tempest-lib>=0.4.0

View File

@ -1,146 +0,0 @@
#!/usr/bin/env bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Tempest test suite"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
echo " -t, --serial Run testr serially"
echo " -C, --config Config file location"
echo " -h, --help Print this usage message"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB"
echo " -l, --logging Enable logging"
echo " -L, --logging-config Logging config file location. Default is etc/logging.conf"
echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr "
}
testrargs=""
venv=.venv
with_venv=tools/with_venv.sh
serial=0
always_venv=0
never_venv=0
no_site_packages=0
debug=0
force=0
wrapper=""
config_file=""
update=0
logging=0
logging_config=etc/logging.conf
if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
exit 1
fi
eval set -- $options
first_uu=yes
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) usage; exit;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-n|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-d|--debug) debug=1;;
-C|--config) config_file=$2; shift;;
-s|--smoke) testrargs+="smoke";;
-t|--serial) serial=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
*) testrargs="$testrargs $1";;
esac
shift
done
if [ -n "$config_file" ]; then
config_file=`readlink -f "$config_file"`
export TEMPEST_CONFIG_DIR=`dirname "$config_file"`
export TEMPEST_CONFIG=`basename "$config_file"`
fi
if [ $logging -eq 1 ]; then
if [ ! -f "$logging_config" ]; then
echo "No such logging config file: $logging_config"
exit 1
fi
logging_config=`readlink -f "$logging_config"`
export TEMPEST_LOG_CONFIG_DIR=`dirname "$logging_config"`
export TEMPEST_LOG_CONFIG=`basename "$logging_config"`
fi
cd `dirname "$0"`
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function testr_init {
if [ ! -d .testrepository ]; then
${wrapper} testr init
fi
}
function run_tests {
testr_init
${wrapper} find . -type f -name "*.pyc" -delete
export OS_TEST_PATH=./tempest/test_discover
if [ $debug -eq 1 ]; then
if [ "$testrargs" = "" ]; then
testrargs="discover ./tempest/test_discover"
fi
${wrapper} python -m testtools.run $testrargs
return $?
fi
if [ $serial -eq 1 ]; then
${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
else
${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
fi
}
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
run_tests
retval=$?
exit $retval

View File

@ -1,150 +0,0 @@
#!/usr/bin/env bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Tempest unit tests"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -t, --serial Run testr serially"
echo " -p, --pep8 Just run pep8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB"
echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr "
}
testrargs=""
just_pep8=0
venv=.venv
with_venv=tools/with_venv.sh
serial=0
always_venv=0
never_venv=0
no_site_packages=0
debug=0
force=0
coverage=0
wrapper=""
config_file=""
update=0
if ! options=$(getopt -o VNnfuctphd -l virtual-env,no-virtual-env,no-site-packages,force,update,serial,coverage,pep8,help,debug -- "$@")
then
# parse error
usage
exit 1
fi
eval set -- $options
first_uu=yes
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) usage; exit;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-n|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-d|--debug) debug=1;;
-p|--pep8) let just_pep8=1;;
-c|--coverage) coverage=1;;
-t|--serial) serial=1;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
*) testrargs="$testrargs $1";;
esac
shift
done
cd `dirname "$0"`
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function testr_init {
if [ ! -d .testrepository ]; then
${wrapper} testr init
fi
}
function run_tests {
testr_init
${wrapper} find . -type f -name "*.pyc" -delete
export OS_TEST_PATH=./tempest/tests
if [ $debug -eq 1 ]; then
if [ "$testrargs" = "" ]; then
testrargs="discover ./tempest/tests"
fi
${wrapper} python -m testtools.run $testrargs
return $?
fi
if [ $coverage -eq 1 ]; then
${wrapper} python setup.py test --coverage
return $?
fi
if [ $serial -eq 1 ]; then
${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
else
${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
fi
}
function run_pep8 {
echo "Running flake8 ..."
if [ $never_venv -eq 1 ]; then
echo "**WARNING**:" >&2
echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2
fi
${wrapper} flake8
}
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
run_tests
retval=$?
if [ -z "$testrargs" ]; then
run_pep8
fi
exit $retval

View File

@ -1,36 +0,0 @@
[metadata]
name = tempest
version = 4
summary = OpenStack Integration Testing
description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
[entry_points]
console_scripts =
verify-tempest-config = tempest.cmd.verify_tempest_config:main
javelin2 = tempest.cmd.javelin:main
run-tempest-stress = tempest.cmd.run_stress:main
tempest-cleanup = tempest.cmd.cleanup:main
oslo.config.opts =
tempest.config = tempest.config:list_opts
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[wheel]
universal = 1

View File

@ -1,30 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

View File

@ -1,52 +0,0 @@
.. _api_field_guide:
Tempest Field Guide to API tests
================================
What are these tests?
---------------------
One of Tempest's prime function is to ensure that your OpenStack cloud
works with the OpenStack API as documented. The current largest
portion of Tempest code is devoted to test cases that do exactly this.
It's also important to test not only the expected positive path on
APIs, but also to provide them with invalid data to ensure they fail
in expected and documented ways. Over the course of the OpenStack
project Tempest has discovered many fundamental bugs by doing just
this.
In order for some APIs to return meaningful results, there must be
enough data in the system. This means these tests might start by
spinning up a server, image, etc, then operating on it.
Why are these tests in tempest?
-------------------------------
This is one of the core missions for the Tempest project, and where it
started. Many people use this bit of function in Tempest to ensure
their clouds haven't broken the OpenStack API.
It could be argued that some of the negative testing could be done
back in the projects themselves, and we might evolve there over time,
but currently in the OpenStack gate this is a fundamentally important
place to keep things.
Scope of these tests
--------------------
API tests should always use the Tempest implementation of the
OpenStack API, as we want to ensure that bugs aren't hidden by the
official clients.
They should test specific API calls, and can build up complex state if
it's needed for the API call to be meaningful.
They should send not only good data, but bad data at the API and look
for error codes.
They should all be able to be run on their own, not depending on the
state created by a previous test.

View File

@ -1,58 +0,0 @@
# (c) 2014 Deutsche Telekom AG
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
common_flavor_details = {
"name": "get-flavor-details",
"http-method": "GET",
"url": "flavors/%s",
"resources": [
{"name": "flavor", "expected_result": 404}
]
}
common_flavor_list = {
"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
"json-schema": {
"type": "object",
"properties": {
}
}
}
common_admin_flavor_create = {
"name": "flavor-create",
"http-method": "POST",
"admin_client": True,
"url": "flavors",
"default_result_code": 400,
"json-schema": {
"type": "object",
"properties": {
"flavor": {
"type": "object",
"properties": {
"name": {"type": "string",
"exclude_tests": ["gen_str_min_length"]},
"ram": {"type": "integer", "minimum": 1},
"vcpus": {"type": "integer", "minimum": 1},
"disk": {"type": "integer"},
"id": {"type": "integer",
"exclude_tests": ["gen_none", "gen_string"]
},
}
}
}
}
}

View File

@ -1,39 +0,0 @@
# (c) 2014 Deutsche Telekom AG
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from tempest.api_schema.request.compute import flavors
flavors_details = copy.deepcopy(flavors.common_flavor_details)
flavor_list = copy.deepcopy(flavors.common_flavor_list)
flavor_create = copy.deepcopy(flavors.common_admin_flavor_create)
flavor_list["json-schema"]["properties"] = {
"minRam": {
"type": "integer",
"results": {
"gen_none": 400,
"gen_string": 400
}
},
"minDisk": {
"type": "integer",
"results": {
"gen_none": 400,
"gen_string": 400
}
}
}

View File

@ -1,60 +0,0 @@
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
node = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'interfaces': {'type': 'array'},
'host': {'type': 'string'},
'task_state': {'type': ['string', 'null']},
'cpus': {'type': ['integer', 'string']},
'memory_mb': {'type': ['integer', 'string']},
'disk_gb': {'type': ['integer', 'string']},
},
'required': ['id', 'interfaces', 'host', 'task_state', 'cpus', 'memory_mb',
'disk_gb']
}
list_baremetal_nodes = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'nodes': {
'type': 'array',
'items': node
}
},
'required': ['nodes']
}
}
baremetal_node = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'node': node
},
'required': ['node']
}
}
get_baremetal_node = copy.deepcopy(baremetal_node)
get_baremetal_node['response_body']['properties']['node'][
'properties'].update({'instance_uuid': {'type': ['string', 'null']}})
get_baremetal_node['response_body']['properties']['node'][
'required'].append('instance_uuid')

View File

@ -1,34 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
add_remove_list_flavor_access = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavor_access': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'flavor_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
},
'required': ['flavor_id', 'tenant_id'],
}
}
},
'required': ['flavor_access']
}
}

View File

@ -1,39 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
flavor_extra_specs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extra_specs': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
},
'required': ['extra_specs']
}
}
flavor_extra_specs_key = {
'status_code': [200],
'response_body': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
}

View File

@ -1,50 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_migrations = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'migrations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'status': {'type': 'string'},
'instance_uuid': {'type': 'string'},
'source_node': {'type': 'string'},
'source_compute': {'type': 'string'},
'dest_node': {'type': 'string'},
'dest_compute': {'type': 'string'},
'dest_host': {'type': 'string'},
# zfl : tecs 1.0 old_instance_type_id is None
# 'old_instance_type_id': {'type': 'integer'},
'new_instance_type_id': {'type': 'integer'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
'required': [
'id', 'status', 'instance_uuid', 'source_node',
'source_compute', 'dest_node', 'dest_compute',
'dest_host', 'old_instance_type_id',
'new_instance_type_id', 'created_at', 'updated_at'
]
}
}
},
'required': ['migrations']
}
}

View File

@ -1,81 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
links = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'rel': {'type': 'string'}
},
'required': ['href', 'rel']
}
}
mac_address = {
'type': 'string',
'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
}
access_ip_v4 = {
'type': 'string',
'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
}
access_ip_v6 = {
'type': 'string',
'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
}
addresses = {
'type': 'object',
'patternProperties': {
# NOTE: Here is for 'private' or something.
'^[a-zA-Z0-9-_.]+$': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'version': {'type': 'integer'},
'addr': {
'type': 'string',
'anyOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['version', 'addr']
}
}
}
}
response_header = {
'connection': {'type': 'string'},
'content-length': {'type': 'string'},
'content-type': {'type': 'string'},
'status': {'type': 'string'},
'x-compute-request-id': {'type': 'string'},
'vary': {'type': 'string'},
'x-openstack-nova-api-version': {'type': 'string'},
'date': {
'type': 'string',
'format': 'data-time'
}
}

View File

@ -1,61 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_services = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'services': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string'],
'pattern': '^[a-zA-Z!]*@[0-9]+$'},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
'binary': {'type': 'string'},
'status': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'disabled_reason': {'type': ['string', 'null']}
},
'required': ['id', 'zone', 'host', 'state', 'binary',
'status', 'updated_at', 'disabled_reason']
}
}
},
'required': ['services']
}
}
enable_service = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'service': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'binary': {'type': 'string'},
'host': {'type': 'string'}
},
'required': ['status', 'binary', 'host']
}
},
'required': ['service']
}
}

View File

@ -1,57 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_agent_info = {
'type': 'object',
'properties': {
'agent_id': {'type': ['integer', 'string']},
'hypervisor': {'type': 'string'},
'os': {'type': 'string'},
'architecture': {'type': 'string'},
'version': {'type': 'string'},
'url': {'type': 'string', 'format': 'uri'},
'md5hash': {'type': 'string'}
},
'required': ['agent_id', 'hypervisor', 'os', 'architecture',
'version', 'url', 'md5hash']
}
list_agents = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'agents': {
'type': 'array',
'items': common_agent_info
}
},
'required': ['agents']
}
}
create_agent = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'agent': common_agent_info
},
'required': ['agent']
}
}
delete_agent = {
'status_code': [200]
}

View File

@ -1,88 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
aggregate_for_create = {
'type': 'object',
'properties': {
'availability_zone': {'type': ['string', 'null']},
'created_at': {'type': 'string'},
'deleted': {'type': 'boolean'},
'deleted_at': {'type': ['string', 'null']},
'id': {'type': 'integer'},
'name': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
'required': ['availability_zone', 'created_at', 'deleted',
'deleted_at', 'id', 'name', 'updated_at'],
}
common_aggregate_info = copy.deepcopy(aggregate_for_create)
common_aggregate_info['properties'].update({
'hosts': {'type': 'array'},
'metadata': {'type': 'object'}
})
common_aggregate_info['required'].extend(['hosts', 'metadata'])
list_aggregates = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregates': {
'type': 'array',
'items': common_aggregate_info
}
},
'required': ['aggregates'],
}
}
get_aggregate = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregate': common_aggregate_info
},
'required': ['aggregate'],
}
}
aggregate_set_metadata = get_aggregate
# The 'updated_at' attribute of 'update_aggregate' can't be null.
update_aggregate = copy.deepcopy(get_aggregate)
update_aggregate['response_body']['properties']['aggregate']['properties'][
'updated_at'] = {
'type': 'string'
}
delete_aggregate = {
'status_code': [200]
}
create_aggregate = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregate': aggregate_for_create
},
'required': ['aggregate'],
}
}
aggregate_add_remove_host = get_aggregate

View File

@ -1,74 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
base = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'availabilityZoneInfo': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'zoneName': {'type': 'string'},
'zoneState': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'}
},
'required': ['available']
},
# NOTE: Here is the difference between detail and
# non-detail.
'hosts': {'type': 'null'}
},
'required': ['zoneName', 'zoneState', 'hosts']
}
}
},
'required': ['availabilityZoneInfo']
}
}
detail = {
'type': 'object',
'patternProperties': {
# NOTE: Here is for a hostname
'^[a-zA-Z0-9-_.]+$': {
'type': 'object',
'patternProperties': {
# NOTE: Here is for a service name
'^.*$': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'},
'active': {'type': 'boolean'},
'updated_at': {'type': ['string', 'null']}
},
'required': ['available', 'active', 'updated_at']
}
}
}
}
}
list_availability_zone_list = copy.deepcopy(base)
list_availability_zone_list_detail = copy.deepcopy(base)
list_availability_zone_list_detail['response_body']['properties'][
'availabilityZoneInfo']['items']['properties']['hosts'] = detail

View File

@ -1,39 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
_common_schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'certificate': {
'type': 'object',
'properties': {
'data': {'type': 'string'},
'private_key': {'type': 'string'},
},
'required': ['data', 'private_key']
}
},
'required': ['certificate']
}
}
get_certificate = copy.deepcopy(_common_schema)
get_certificate['response_body']['properties']['certificate'][
'properties']['private_key'].update({'type': 'null'})
create_certificate = copy.deepcopy(_common_schema)

View File

@ -1,45 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_extensions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extensions': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'updated': {
'type': 'string',
'format': 'data-time'
},
'name': {'type': 'string'},
'links': {'type': 'array'},
'namespace': {
'type': 'string',
'format': 'uri'
},
'alias': {'type': 'string'},
'description': {'type': 'string'}
},
'required': ['updated', 'name', 'links', 'namespace',
'alias', 'description']
}
}
},
'required': ['extensions']
}
}

View File

@ -1,41 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_fixed_ip = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'fixed_ip': {
'type': 'object',
'properties': {
'address': {
'type': 'string',
'format': 'ip-address'
},
'cidr': {'type': 'string'},
'host': {'type': 'string'},
'hostname': {'type': 'string'}
},
'required': ['address', 'cidr', 'host', 'hostname']
}
},
'required': ['fixed_ip']
}
}
reserve_fixed_ip = {
'status_code': [202],
'response_body': {'type': 'string'}
}

View File

@ -1,98 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.response.compute import parameter_types
list_flavors = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'links': parameter_types.links,
'id': {'type': 'string'}
},
'required': ['name', 'links', 'id']
}
},
'flavors_links': parameter_types.links
},
# NOTE(gmann): flavors_links attribute is not necessary
# to be present always So it is not 'required'.
'required': ['flavors']
}
}
common_flavor_info = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'links': parameter_types.links,
'ram': {'type': 'integer'},
'vcpus': {'type': 'integer'},
# 'swap' attributes comes as integer value but if it is empty
# it comes as "". So defining type of as string and integer.
'swap': {'type': ['integer', 'string']},
'disk': {'type': 'integer'},
'id': {'type': 'string'},
'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
'os-flavor-access:is_public': {'type': 'boolean'},
'rxtx_factor': {'type': 'number'},
'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}
},
# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and
# 'OS-FLV-EXT-DATA' are API extensions. So they are not 'required'.
'required': ['name', 'links', 'ram', 'vcpus', 'swap', 'disk', 'id']
}
list_flavors_details = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavors': {
'type': 'array',
'items': common_flavor_info
},
# NOTE(gmann): flavors_links attribute is not necessary
# to be present always So it is not 'required'.
'flavors_links': parameter_types.links
},
'required': ['flavors']
}
}
unset_flavor_extra_specs = {
'status_code': [200]
}
create_get_flavor_details = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavor': common_flavor_info
},
'required': ['flavor']
}
}
delete_flavor = {
'status_code': [202]
}

View File

@ -1,148 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_floating_ip_info = {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but
# here allows 'string' also because we will be
# able to change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
'instance_id': {'type': ['string', 'null']},
'ip': {
'type': 'string',
'format': 'ip-address'
},
'fixed_ip': {
'type': ['string', 'null'],
'format': 'ip-address'
}
},
'required': ['id', 'pool', 'instance_id',
'ip', 'fixed_ip'],
}
list_floating_ips = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips': {
'type': 'array',
'items': common_floating_ip_info
},
},
'required': ['floating_ips'],
}
}
floating_ip = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip': common_floating_ip_info
},
'required': ['floating_ip'],
}
}
floating_ip_pools = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_pools': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'}
},
'required': ['name'],
}
}
},
'required': ['floating_ip_pools'],
}
}
add_remove_floating_ip = {
'status_code': [202]
}
create_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips_bulk_create': {
'type': 'object',
'properties': {
'interface': {'type': ['string', 'null']},
'ip_range': {'type': 'string'},
'pool': {'type': ['string', 'null']},
},
'required': ['interface', 'ip_range', 'pool'],
}
},
'required': ['floating_ips_bulk_create'],
}
}
delete_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips_bulk_delete': {'type': 'string'}
},
'required': ['floating_ips_bulk_delete'],
}
}
list_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_info': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'address': {
'type': 'string',
'format': 'ip-address'
},
'instance_uuid': {'type': ['string', 'null']},
'interface': {'type': ['string', 'null']},
'pool': {'type': ['string', 'null']},
'project_id': {'type': ['string', 'null']},
'fixed_ip': {
'type': ['string', 'null'],
'format': 'ip-address'
}
},
# NOTE: fixed_ip is introduced after JUNO release,
# So it is not defined as 'required'.
'required': ['address', 'instance_uuid', 'interface',
'pool', 'project_id'],
}
}
},
'required': ['floating_ip_info'],
}
}

View File

@ -1,109 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
list_hosts = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hosts': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'host_name': {'type': 'string'},
'service': {'type': 'string'},
'zone': {'type': 'string'}
},
'required': ['host_name', 'service', 'zone']
}
}
},
'required': ['hosts']
}
}
get_host_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {
'type': 'array',
'item': {
'type': 'object',
'properties': {
'resource': {
'type': 'object',
'properties': {
'cpu': {'type': 'integer'},
'disk_gb': {'type': 'integer'},
'host': {'type': 'string'},
'memory_mb': {'type': 'integer'},
'project': {'type': 'string'}
},
'required': ['cpu', 'disk_gb', 'host',
'memory_mb', 'project']
}
},
'required': ['resource']
}
}
},
'required': ['host']
}
}
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'power_action': {'enum': ['startup']}
},
'required': ['host', 'power_action']
}
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
shutdown_host = copy.deepcopy(startup_host)
shutdown_host['response_body']['properties']['power_action'] = {
'enum': ['shutdown']
}
# The 'power_action' attribute of 'reboot_host' API is 'reboot'
reboot_host = copy.deepcopy(startup_host)
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
update_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'maintenance_mode': {'enum': ['on_maintenance',
'off_maintenance']},
'status': {'enum': ['enabled', 'disabled']}
},
'required': ['host', 'maintenance_mode', 'status']
}
}

View File

@ -1,185 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
get_hypervisor_statistics = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisor_statistics': {
'type': 'object',
'properties': {
'count': {'type': 'integer'},
'current_workload': {'type': 'integer'},
'disk_available_least': {'type': ['integer', 'null']},
'free_disk_gb': {'type': 'integer'},
'free_ram_mb': {'type': 'integer'},
'local_gb': {'type': 'integer'},
'local_gb_used': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'memory_mb_used': {'type': 'integer'},
'running_vms': {'type': 'integer'},
'vcpus': {'type': 'integer'},
'vcpus_used': {'type': 'integer'}
},
'required': ['count', 'current_workload',
'disk_available_least', 'free_disk_gb',
'free_ram_mb', 'local_gb', 'local_gb_used',
'memory_mb', 'memory_mb_used', 'running_vms',
'vcpus', 'vcpus_used']
}
},
'required': ['hypervisor_statistics']
}
}
hypervisor_detail = {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'state': {'type': 'string'},
'cpu_info': {'type': 'string'},
'current_workload': {'type': 'integer'},
'disk_available_least': {'type': ['integer', 'null']},
'host_ip': {
'type': 'string',
'format': 'ip-address'
},
'free_disk_gb': {'type': 'integer'},
'free_ram_mb': {'type': 'integer'},
'hypervisor_hostname': {'type': 'string'},
'hypervisor_type': {'type': 'string'},
'hypervisor_version': {'type': 'integer'},
'id': {'type': ['integer', 'string']},
'local_gb': {'type': 'integer'},
'local_gb_used': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'memory_mb_used': {'type': 'integer'},
'running_vms': {'type': 'integer'},
'service': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'id': {'type': ['integer', 'string']},
'disabled_reason': {'type': ['string', 'null']}
},
'required': ['host', 'id']
},
'vcpus': {'type': 'integer'},
'vcpus_used': {'type': 'integer'}
},
# NOTE: When loading os-hypervisor-status extension,
# a response contains status and state. So these params
# should not be required.
'required': ['cpu_info', 'current_workload',
'disk_available_least', 'host_ip',
'free_disk_gb', 'free_ram_mb',
'hypervisor_hostname', 'hypervisor_type',
'hypervisor_version', 'id', 'local_gb',
'local_gb_used', 'memory_mb', 'memory_mb_used',
'running_vms', 'service', 'vcpus', 'vcpus_used']
}
list_hypervisors_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisors': {
'type': 'array',
'items': hypervisor_detail
}
},
'required': ['hypervisors']
}
}
get_hypervisor = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisor': hypervisor_detail
},
'required': ['hypervisor']
}
}
list_search_hypervisors = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'state': {'type': 'string'},
'id': {'type': ['integer', 'string']},
'hypervisor_hostname': {'type': 'string'}
},
# NOTE: When loading os-hypervisor-status extension,
# a response contains status and state. So these params
# should not be required.
'required': ['id', 'hypervisor_hostname']
}
}
},
'required': ['hypervisors']
}
}
get_hypervisor_uptime = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisor': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'state': {'type': 'string'},
'id': {'type': ['integer', 'string']},
'hypervisor_hostname': {'type': 'string'},
'uptime': {'type': 'string'}
},
# NOTE: When loading os-hypervisor-status extension,
# a response contains status and state. So these params
# should not be required.
'required': ['id', 'hypervisor_hostname', 'uptime']
}
},
'required': ['hypervisor']
}
}
get_hypervisors_servers = copy.deepcopy(list_search_hypervisors)
get_hypervisors_servers['response_body']['properties']['hypervisors']['items'][
'properties']['servers'] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'uuid': {'type': 'string'},
'name': {'type': 'string'}
}
}
}
# In V2 API, if there is no servers (VM) on the Hypervisor host then 'servers'
# attribute will not be present in response body So it is not 'required'.

View File

@ -1,146 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute import parameter_types
image_links = copy.deepcopy(parameter_types.links)
image_links['items']['properties'].update({'type': {'type': 'string'}})
common_image_schema = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'status': {'type': 'string'},
'updated': {'type': 'string'},
'links': image_links,
'name': {'type': 'string'},
'created': {'type': 'string'},
'minDisk': {'type': 'integer'},
'minRam': {'type': 'integer'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'server': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
'OS-DCF:diskConfig': {'type': 'string'}
},
# 'server' attributes only comes in response body if image is
# associated with any server. 'OS-EXT-IMG-SIZE:size' & 'OS-DCF:diskConfig'
# are API extension, So those are not defined as 'required'.
'required': ['id', 'status', 'updated', 'links', 'name',
'created', 'minDisk', 'minRam', 'progress',
'metadata']
}
get_image = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'image': common_image_schema
},
'required': ['image']
}
}
list_images = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'images': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': image_links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
}
},
'images_links': parameter_types.links
},
# NOTE(gmann): images_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['images']
}
}
create_image = {
'status_code': [202],
'response_header': {
'type': 'object',
'properties': parameter_types.response_header
}
}
create_image['response_header']['properties'].update(
{'location': {
'type': 'string',
'format': 'uri'}
}
)
create_image['response_header']['required'] = ['location']
delete = {
'status_code': [204]
}
image_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {'type': 'object'}
},
'required': ['metadata']
}
}
image_meta_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {'type': 'object'}
},
'required': ['meta']
}
}
list_images_details = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'images': {
'type': 'array',
'items': common_image_schema
},
'images_links': parameter_types.links
},
# NOTE(gmann): images_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['images']
}
}

View File

@ -1,59 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_instance_usage_audit_log = {
'type': 'object',
'properties': {
'hosts_not_run': {
'type': 'array',
'items': {'type': 'string'}
},
'log': {'type': 'object'},
'num_hosts': {'type': 'integer'},
'num_hosts_done': {'type': 'integer'},
'num_hosts_not_run': {'type': 'integer'},
'num_hosts_running': {'type': 'integer'},
'overall_status': {'type': 'string'},
'period_beginning': {'type': 'string'},
'period_ending': {'type': 'string'},
'total_errors': {'type': 'integer'},
'total_instances': {'type': 'integer'}
},
'required': ['hosts_not_run', 'log', 'num_hosts', 'num_hosts_done',
'num_hosts_not_run', 'num_hosts_running', 'overall_status',
'period_beginning', 'period_ending', 'total_errors',
'total_instances']
}
get_instance_usage_audit_log = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instance_usage_audit_log': common_instance_usage_audit_log
},
'required': ['instance_usage_audit_log']
}
}
list_instance_usage_audit_log = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instance_usage_audit_logs': common_instance_usage_audit_log
},
'required': ['instance_usage_audit_logs']
}
}

View File

@ -1,72 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.response.compute import parameter_types
interface_common_info = {
'type': 'object',
'properties': {
'port_state': {'type': 'string'},
'fixed_ips': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'subnet_id': {
'type': 'string',
'format': 'uuid'
},
'ip_address': {
'type': 'string',
'format': 'ipv4'
}
},
'required': ['subnet_id', 'ip_address']
}
},
'port_id': {'type': 'string', 'format': 'uuid'},
'net_id': {'type': 'string', 'format': 'uuid'},
'mac_addr': parameter_types.mac_address
},
'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
}
get_create_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'interfaceAttachment': interface_common_info
},
'required': ['interfaceAttachment']
}
}
list_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'interfaceAttachments': {
'type': 'array',
'items': interface_common_info
}
},
'required': ['interfaceAttachments']
}
}
delete_interface = {
'status_code': [202]
}

View File

@ -1,100 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'},
'user_id': {'type': 'string'},
'deleted': {'type': 'boolean'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'deleted_at': {'type': ['string', 'null']},
'id': {'type': 'integer'}
},
# When we run the get keypair API, response body includes
# all the above mentioned attributes.
# But in Nova API sample file, response body includes only
# 'public_key', 'name' & 'fingerprint'. So only 'public_key',
# 'name' & 'fingerprint' are defined as 'required'.
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
create_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'fingerprint': {'type': 'string'},
'name': {'type': 'string'},
'public_key': {'type': 'string'},
'user_id': {'type': 'string'},
'private_key': {'type': 'string'}
},
# When create keypair API is being called with 'Public key'
# (Importing keypair) then, response body does not contain
# 'private_key' So it is not defined as 'required'
'required': ['fingerprint', 'name', 'public_key', 'user_id']
}
},
'required': ['keypair']
}
}
delete_keypair = {
'status_code': [202],
}
list_keypairs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypairs': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'}
},
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
},
'required': ['keypairs']
}
}

View File

@ -1,101 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_limit = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'limits': {
'type': 'object',
'properties': {
'absolute': {
'type': 'object',
'properties': {
'maxTotalRAMSize': {'type': 'integer'},
'totalCoresUsed': {'type': 'integer'},
'maxTotalInstances': {'type': 'integer'},
'maxTotalFloatingIps': {'type': 'integer'},
'totalSecurityGroupsUsed': {'type': 'integer'},
'maxTotalCores': {'type': 'integer'},
'totalFloatingIpsUsed': {'type': 'integer'},
'maxSecurityGroups': {'type': 'integer'},
'maxServerMeta': {'type': 'integer'},
'maxPersonality': {'type': 'integer'},
'maxImageMeta': {'type': 'integer'},
'maxPersonalitySize': {'type': 'integer'},
'maxSecurityGroupRules': {'type': 'integer'},
'maxTotalKeypairs': {'type': 'integer'},
'totalRAMUsed': {'type': 'integer'},
'totalInstancesUsed': {'type': 'integer'},
'maxServerGroupMembers': {'type': 'integer'},
'maxServerGroups': {'type': 'integer'},
'totalServerGroupsUsed': {'type': 'integer'}
},
# NOTE(gmann): maxServerGroupMembers, maxServerGroups
# and totalServerGroupsUsed are API extension,
# and some environments return a response without these
# attributes.So they are not 'required'.
'required': ['maxImageMeta',
'maxPersonality',
'maxPersonalitySize',
'maxSecurityGroupRules',
'maxSecurityGroups',
'maxServerMeta',
'maxTotalCores',
'maxTotalFloatingIps',
'maxTotalInstances',
'maxTotalKeypairs',
'maxTotalRAMSize',
'totalCoresUsed',
'totalFloatingIpsUsed',
'totalInstancesUsed',
'totalRAMUsed',
'totalSecurityGroupsUsed']
},
'rate': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'limit': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'next-available':
{'type': 'string'},
'remaining':
{'type': 'integer'},
'unit':
{'type': 'string'},
'value':
{'type': 'integer'},
'verb':
{'type': 'string'}
}
}
},
'regex': {'type': 'string'},
'uri': {'type': 'string'}
}
}
}
},
'required': ['absolute', 'rate']
}
},
'required': ['limits']
}
}

View File

@ -1,31 +0,0 @@
# Copyright 2014 IBM Corporation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute.v2_1 import quotas
# NOTE(mriedem): os-quota-class-sets responses are the same as os-quota-sets
# except for the key in the response body is quota_class_set instead of
# quota_set, so update this copy of the schema from os-quota-sets.
get_quota_class_set = copy.deepcopy(quotas.get_quota_set)
get_quota_class_set['response_body']['properties']['quota_class_set'] = (
get_quota_class_set['response_body']['properties'].pop('quota_set'))
get_quota_class_set['response_body']['required'] = ['quota_class_set']
update_quota_class_set = copy.deepcopy(quotas.update_quota_set)
update_quota_class_set['response_body']['properties']['quota_class_set'] = (
update_quota_class_set['response_body']['properties'].pop('quota_set'))
update_quota_class_set['response_body']['required'] = ['quota_class_set']

View File

@ -1,63 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
update_quota_set = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'quota_set': {
'type': 'object',
'properties': {
'instances': {'type': 'integer'},
'cores': {'type': 'integer'},
'ram': {'type': 'integer'},
'floating_ips': {'type': 'integer'},
'fixed_ips': {'type': 'integer'},
'metadata_items': {'type': 'integer'},
'key_pairs': {'type': 'integer'},
'security_groups': {'type': 'integer'},
'security_group_rules': {'type': 'integer'},
'server_group_members': {'type': 'integer'},
'server_groups': {'type': 'integer'},
'injected_files': {'type': 'integer'},
'injected_file_content_bytes': {'type': 'integer'},
'injected_file_path_bytes': {'type': 'integer'}
},
# NOTE: server_group_members and server_groups are represented
# when enabling quota_server_group extension. So they should
# not be required.
'required': ['instances', 'cores', 'ram',
'floating_ips', 'fixed_ips',
'metadata_items', 'key_pairs',
'security_groups', 'security_group_rules',
'injected_files', 'injected_file_content_bytes',
'injected_file_path_bytes']
}
},
'required': ['quota_set']
}
}
get_quota_set = copy.deepcopy(update_quota_set)
get_quota_set['response_body']['properties']['quota_set']['properties'][
'id'] = {'type': 'string'}
get_quota_set['response_body']['properties']['quota_set']['required'].extend([
'id'])
delete_quota = {
'status_code': [202]
}

View File

@ -1,61 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_security_group_default_rule_info = {
'type': 'object',
'properties': {
'from_port': {'type': 'integer'},
'id': {'type': 'integer'},
'ip_protocol': {'type': 'string'},
'ip_range': {
'type': 'object',
'properties': {
'cidr': {'type': 'string'}
},
'required': ['cidr'],
},
'to_port': {'type': 'integer'},
},
'required': ['from_port', 'id', 'ip_protocol', 'ip_range', 'to_port'],
}
create_get_security_group_default_rule = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_group_default_rule':
common_security_group_default_rule_info
},
'required': ['security_group_default_rule']
}
}
delete_security_group_default_rule = {
'status_code': [204]
}
list_security_group_default_rules = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_group_default_rules': {
'type': 'array',
'items': common_security_group_default_rule_info
}
},
'required': ['security_group_default_rules']
}
}

View File

@ -1,105 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_security_group_rule = {
'from_port': {'type': ['integer', 'null']},
'to_port': {'type': ['integer', 'null']},
'group': {
'type': 'object',
'properties': {
'tenant_id': {'type': 'string'},
'name': {'type': 'string'}
}
},
'ip_protocol': {'type': ['string', 'null']},
# 'parent_group_id' can be UUID so defining it as 'string' also.
'parent_group_id': {'type': ['string', 'integer', 'null']},
'ip_range': {
'type': 'object',
'properties': {
'cidr': {'type': 'string'}
}
# When optional argument is provided in request body
# like 'group_id' then, attribute 'cidr' does not
# comes in response body. So it is not 'required'.
},
'id': {'type': ['string', 'integer']}
}
common_security_group = {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'name': {'type': 'string'},
'tenant_id': {'type': 'string'},
'rules': {
'type': 'array',
'items': {
'type': ['object', 'null'],
'properties': common_security_group_rule
}
},
'description': {'type': 'string'},
},
'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
}
list_security_groups = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_groups': {
'type': 'array',
'items': common_security_group
}
},
'required': ['security_groups']
}
}
get_security_group = create_security_group = update_security_group = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_group': common_security_group
},
'required': ['security_group']
}
}
delete_security_group = {
'status_code': [202]
}
create_security_group_rule = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_group_rule': {
'type': 'object',
'properties': common_security_group_rule,
'required': ['from_port', 'to_port', 'group', 'ip_protocol',
'parent_group_id', 'id', 'ip_range']
}
},
'required': ['security_group_rule']
}
}
delete_security_group_rule = {
'status_code': [202]
}

View File

@ -1,520 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute import parameter_types
create_server = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'security_groups': {'type': 'array'},
'links': parameter_types.links,
'OS-DCF:diskConfig': {'type': 'string'}
},
# NOTE: OS-DCF:diskConfig & security_groups are API extension,
# and some environments return a response without these
# attributes.So they are not 'required'.
'required': ['id', 'links']
}
},
'required': ['server']
}
}
create_server_with_admin_pass = copy.deepcopy(create_server)
create_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
create_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
list_servers = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
}
},
'servers_links': parameter_types.links
},
# NOTE(gmann): servers_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['servers']
}
}
delete_server = {
'status_code': [204],
}
common_show_server = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
'image': {'oneOf': [
{'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']},
{'type': ['string', 'null']}
]},
'flavor': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'fault': {
'type': 'object',
'properties': {
'code': {'type': 'integer'},
'created': {'type': 'string'},
'message': {'type': 'string'},
'details': {'type': 'string'},
},
# NOTE(gmann): 'details' is not necessary to be present
# in the 'fault'. So it is not defined as 'required'.
'required': ['code', 'created', 'message']
},
'user_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
'created': {'type': 'string'},
'updated': {'type': 'string'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'links': parameter_types.links,
'addresses': parameter_types.addresses,
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6
},
# NOTE(GMann): 'progress' attribute is present in the response
# only when server's status is one of the progress statuses
# ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
# 'fault' attribute is present in the response
# only when server's status is one of the "ERROR", "DELETED".
# OS-DCF:diskConfig and accessIPv4/v6 are API
# extensions, and some environments return a response
# without these attributes.So these are not defined as 'required'.
'required': ['id', 'name', 'status', 'image', 'flavor',
'user_id', 'tenant_id', 'created', 'updated',
'metadata', 'links', 'addresses', 'hostId']
}
update_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': common_show_server
},
'required': ['server']
}
}
server_detail = copy.deepcopy(common_show_server)
server_detail['properties'].update({
'key_name': {'type': ['string', 'null']},
'security_groups': {'type': 'array'},
# NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
# attributes.
'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
'OS-EXT-AZ:availability_zone': {'type': 'string'},
# NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
# attributes.
'OS-EXT-STS:task_state': {'type': ['string', 'null']},
'OS-EXT-STS:vm_state': {'type': 'string'},
'OS-EXT-STS:power_state': {'type': 'integer'},
'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
'os-extended-volumes:volumes_attached': {'type': 'array'},
'config_drive': {'type': 'string'}
})
server_detail['properties']['addresses']['patternProperties'][
'^[a-zA-Z0-9-_.]+$']['items']['properties'].update({
'OS-EXT-IPS:type': {'type': 'string'},
'OS-EXT-IPS-MAC:mac_addr': parameter_types.mac_address})
# NOTE(gmann): Update OS-EXT-IPS:type and OS-EXT-IPS-MAC:mac_addr
# attributes in server address. Those are API extension,
# and some environments return a response without
# these attributes. So they are not 'required'.
get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': server_detail
},
'required': ['server']
}
}
list_servers_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': server_detail
},
'servers_links': parameter_types.links
},
# NOTE(gmann): servers_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['servers']
}
}
rebuild_server = copy.deepcopy(update_server)
rebuild_server['status_code'] = [202]
rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
rescue_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'adminPass': {'type': 'string'}
},
'required': ['adminPass']
}
}
evacuate_server = copy.deepcopy(rescue_server)
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'virtual_interfaces': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'mac_address': parameter_types.mac_address,
'OS-EXT-VIF-NET:net_id': {'type': 'string'}
},
# 'OS-EXT-VIF-NET:net_id' is API extension So it is
# not defined as 'required'
'required': ['id', 'mac_address']
}
}
},
'required': ['virtual_interfaces']
}
}
common_attach_volume_info = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
},
'required': ['id', 'device', 'volumeId', 'serverId']
}
attach_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachment': common_attach_volume_info
},
'required': ['volumeAttachment']
}
}
detach_volume = {
'status_code': [202]
}
get_volume_attachment = copy.deepcopy(attach_volume)
get_volume_attachment['response_body']['properties'][
'volumeAttachment']['properties'].update({'serverId': {'type': 'string'}})
list_volume_attachments = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachments': {
'type': 'array',
'items': common_attach_volume_info
}
},
'required': ['volumeAttachments']
}
}
list_volume_attachments['response_body']['properties'][
'volumeAttachments']['items']['properties'].update(
{'serverId': {'type': 'string'}})
list_addresses_by_network = {
'status_code': [200],
'response_body': parameter_types.addresses
}
list_addresses = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'addresses': parameter_types.addresses
},
'required': ['addresses']
}
}
common_server_group = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'policies': {
'type': 'array',
'items': {'type': 'string'}
},
# 'members' attribute contains the array of instance's UUID of
# instances present in server group
'members': {
'type': 'array',
'items': {'type': 'string'}
},
'metadata': {'type': 'object'}
},
'required': ['id', 'name', 'policies', 'members', 'metadata']
}
create_get_server_group = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_group': common_server_group
},
'required': ['server_group']
}
}
delete_server_group = {
'status_code': [204]
}
list_server_groups = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_groups': {
'type': 'array',
'items': common_server_group
}
},
'required': ['server_groups']
}
}
instance_actions = {
'type': 'object',
'properties': {
'action': {'type': 'string'},
'request_id': {'type': 'string'},
'user_id': {'type': 'string'},
'project_id': {'type': 'string'},
'start_time': {'type': 'string'},
'message': {'type': ['string', 'null']},
'instance_uuid': {'type': 'string'}
},
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message', 'instance_uuid']
}
instance_action_events = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'event': {'type': 'string'},
'start_time': {'type': 'string'},
'finish_time': {'type': 'string'},
'result': {'type': 'string'},
'traceback': {'type': ['string', 'null']}
},
'required': ['event', 'start_time', 'finish_time', 'result',
'traceback']
}
}
list_instance_actions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceActions': {
'type': 'array',
'items': instance_actions
}
},
'required': ['instanceActions']
}
}
instance_actions_with_events = copy.deepcopy(instance_actions)
instance_actions_with_events['properties'].update({
'events': instance_action_events})
# 'events' does not come in response body always so it is not
# defined as 'required'
get_instance_action = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceAction': instance_actions_with_events
},
'required': ['instanceAction']
}
}
get_password = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'password': {'type': 'string'}
},
'required': ['password']
}
}
get_vnc_console = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'console': {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'url': {
'type': 'string',
'format': 'uri'
}
},
'required': ['type', 'url']
}
},
'required': ['console']
}
}
get_console_output = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'output': {'type': 'string'}
},
'required': ['output']
}
}
set_server_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'required': ['metadata']
}
}
list_server_metadata = copy.deepcopy(set_server_metadata)
update_server_metadata = copy.deepcopy(set_server_metadata)
delete_server_metadata_item = {
'status_code': [204]
}
set_get_server_metadata_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'required': ['meta']
}
}
server_actions_common_schema = {
'status_code': [202]
}
server_actions_delete_password = {
'status_code': [204]
}
server_actions_confirm_resize = copy.deepcopy(
server_actions_delete_password)

View File

@ -1,50 +0,0 @@
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
param_network = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'cidr': {'type': ['string', 'null']},
'label': {'type': 'string'}
},
'required': ['id', 'cidr', 'label']
}
list_tenant_networks = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'networks': {
'type': 'array',
'items': param_network
}
},
'required': ['networks']
}
}
get_tenant_network = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'network': param_network
},
'required': ['network']
}
}

View File

@ -1,92 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
_server_usages = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'ended_at': {
'oneOf': [
{'type': 'string'},
{'type': 'null'}
]
},
'flavor': {'type': 'string'},
'hours': {'type': 'number'},
'instance_id': {'type': 'string'},
'local_gb': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'name': {'type': 'string'},
'started_at': {'type': 'string'},
'state': {'type': 'string'},
'tenant_id': {'type': 'string'},
'uptime': {'type': 'integer'},
'vcpus': {'type': 'integer'},
},
'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
'uptime', 'vcpus']
}
}
_tenant_usage_list = {
'type': 'object',
'properties': {
'server_usages': _server_usages,
'start': {'type': 'string'},
'stop': {'type': 'string'},
'tenant_id': {'type': 'string'},
'total_hours': {'type': 'number'},
'total_local_gb_usage': {'type': 'number'},
'total_memory_mb_usage': {'type': 'number'},
'total_vcpus_usage': {'type': 'number'},
},
'required': ['start', 'stop', 'tenant_id',
'total_hours', 'total_local_gb_usage',
'total_memory_mb_usage', 'total_vcpus_usage']
}
# 'required' of get_tenant is different from list_tenant's.
_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
'total_hours', 'total_local_gb_usage',
'total_memory_mb_usage', 'total_vcpus_usage']
list_tenant = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'tenant_usages': {
'type': 'array',
'items': _tenant_usage_list
}
},
'required': ['tenant_usages']
}
}
get_tenant = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'tenant_usage': _tenant_usage_get
},
'required': ['tenant_usage']
}
}

View File

@ -1,114 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
create_get_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'status': {'type': 'string'},
'displayName': {'type': ['string', 'null']},
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
'attachments': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': 'string'}
}
# NOTE- If volume is not attached to any server
# then, 'attachments' attributes comes as array
# with empty objects "[{}]" due to that elements
# of 'attachments' cannot defined as 'required'.
# If it would come as empty array "[]" then,
# those elements can be defined as 'required'.
}
}
},
'required': ['id', 'status', 'displayName', 'availabilityZone',
'createdAt', 'displayDescription', 'volumeType',
'snapshotId', 'metadata', 'size', 'attachments']
}
},
'required': ['volume']
}
}
list_volumes = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'status': {'type': 'string'},
'displayName': {'type': ['string', 'null']},
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
'attachments': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': 'string'}
}
# NOTE- If volume is not attached to any server
# then, 'attachments' attributes comes as array
# with empty object "[{}]" due to that elements
# of 'attachments' cannot defined as 'required'
# If it would come as empty array "[]" then,
# those elements can be defined as 'required'.
}
}
},
'required': ['id', 'status', 'displayName',
'availabilityZone', 'createdAt',
'displayDescription', 'volumeType',
'snapshotId', 'metadata', 'size',
'attachments']
}
}
},
'required': ['volumes']
}
}
delete_volume = {
'status_code': [202]
}

View File

@ -1,59 +0,0 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
version = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'version': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string', 'format': 'uri'},
'rel': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['href', 'rel']
}
},
'media-types': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['base', 'type']
}
},
'status': {'type': 'string'},
'updated': {'type': 'string', 'format': 'date-time'},
'version': {'type': 'string'},
'min_version': {'type': 'string'}
},
# NOTE: version and min_version have been added since Kilo,
# so they should not be required.
'required': ['id', 'links', 'media-types', 'status', 'updated']
}
},
'required': ['version']
}
}

View File

@ -1,239 +0,0 @@
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
list_link = {
'type': 'object',
'properties': {
'rel': {'type': 'string'},
'href': {
'type': 'string',
'format': 'uri'
}
},
'required': ['href', 'rel']
}
list_queue = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'href': {
'type': 'string',
'format': 'uri'
},
'metadata': {'type': 'object'}
},
'required': ['name', 'href']
}
list_queues = {
'status_code': [200, 204],
'response_body': {
'type': 'object',
'properties': {
'links': {
'type': 'array',
'items': list_link,
'maxItems': 1
},
'queues': {
'type': 'array',
'items': list_queue
}
},
'required': ['links', 'queues']
}
}
age = {
'type': 'number',
'minimum': 0
}
message_link = {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'age': age,
'created': {
'type': 'string',
'format': 'date-time'
}
},
'required': ['href', 'age', 'created']
}
messages = {
'type': 'object',
'properties': {
'free': {'type': 'number'},
'claimed': {'type': 'number'},
'total': {'type': 'number'},
'oldest': message_link,
'newest': message_link
},
'required': ['free', 'claimed', 'total']
}
queue_stats = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'messages': messages
},
'required': ['messages']
}
}
resource_schema = {
'type': 'array',
'items': {
'type': 'string'
},
'minItems': 1
}
post_messages = {
'status_code': [201],
'response_body': {
'type': 'object',
'properties': {
'resources': resource_schema,
'partial': {'type': 'boolean'}
}
},
'required': ['resources', 'partial']
}
message_ttl = {
'type': 'number',
'minimum': 1
}
list_messages_links = {
'type': 'array',
'maxItems': 1,
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'rel': {'type': 'string'},
'href': {'type': 'string'}
},
'required': ['rel', 'href']
}
}
list_messages_response = {
'type': 'array',
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string'},
'ttl': message_ttl,
'age': age,
'body': {'type': 'object'}
},
'required': ['href', 'ttl', 'age', 'body']
}
}
list_messages = {
'status_code': [200, 204],
'response_body': {
'type': 'object',
'properties': {
'links': list_messages_links,
'messages': list_messages_response
}
},
'required': ['links', 'messages']
}
single_message = {
'type': 'object',
'properties': {
'href': {'type': 'string'},
'ttl': message_ttl,
'age': age,
'body': {'type': 'object'}
},
'required': ['href', 'ttl', 'age', 'body']
}
get_single_message = {
'status_code': [200],
'response_body': single_message
}
get_multiple_messages = {
'status_code': [200],
'response_body': {
'type': 'array',
'items': single_message,
'minItems': 1
}
}
messages_claimed = {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'ttl': message_ttl,
'age': {'type': 'number'},
'body': {'type': 'object'}
},
'required': ['href', 'ttl', 'age', 'body']
}
claim_messages = {
'status_code': [201, 204],
'response_body': {
'type': 'array',
'items': messages_claimed,
'minItems': 1
}
}
claim_ttl = {
'type': 'number',
'minimum': 1
}
query_claim = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'age': {'type': 'number'},
'ttl': claim_ttl,
'messages': {
'type': 'array',
'minItems': 1
}
},
'required': ['ttl', 'age', 'messages']
}
}

View File

@ -1,659 +0,0 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import datetime
import exceptions
import re
import urlparse
from oslo_log import log as logging
import six
from tempest.services.identity.v2.json import token_client as json_v2id
from tempest.services.identity.v3.json import token_client as json_v3id
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class AuthProvider(object):
"""
Provide authentication
"""
def __init__(self, credentials):
"""
:param credentials: credentials for authentication
"""
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
self.cache = None
self.alt_auth_data = None
self.alt_part = None
def __str__(self):
return "Creds :{creds}, cached auth data: {cache}".format(
creds=self.credentials, cache=self.cache)
@abc.abstractmethod
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
"""
Decorate request with authentication data
"""
return
@abc.abstractmethod
def _get_auth(self):
return
@abc.abstractmethod
def _fill_credentials(self, auth_data_body):
return
def fill_credentials(self):
"""
Fill credentials object with data from auth
"""
auth_data = self.get_auth()
self._fill_credentials(auth_data[1])
return self.credentials
@classmethod
def check_credentials(cls, credentials):
"""
Verify credentials are valid.
"""
return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
return self.get_auth()
@auth_data.deleter
def auth_data(self):
self.clear_auth()
def get_auth(self):
"""
Returns auth from cache if available, else auth first
"""
if self.cache is None or self.is_expired(self.cache):
self.set_auth()
return self.cache
def set_auth(self):
"""
Forces setting auth, ignores cache if it exists.
Refills credentials
"""
self.cache = self._get_auth()
self._fill_credentials(self.cache[1])
def clear_auth(self):
"""
Can be called to clear the access cache so that next request
will fetch a new token and base_url.
"""
self.cache = None
self.credentials.reset()
@abc.abstractmethod
def is_expired(self, auth_data):
return
def auth_request(self, method, url, headers=None, body=None, filters=None):
"""
Obtains auth data and decorates a request with that.
:param method: HTTP method of the request
:param url: relative URL of the request (path)
:param headers: HTTP headers of the request
:param body: HTTP body in case of POST / PUT
:param filters: select a base URL out of the catalog
:returns a Tuple (url, headers, body)
"""
orig_req = dict(url=url, headers=headers, body=body)
auth_url, auth_headers, auth_body = self._decorate_request(
filters, method, url, headers, body)
auth_req = dict(url=auth_url, headers=auth_headers, body=auth_body)
# Overwrite part if the request if it has been requested
if self.alt_part is not None:
if self.alt_auth_data is not None:
alt_url, alt_headers, alt_body = self._decorate_request(
filters, method, url, headers, body,
auth_data=self.alt_auth_data)
alt_auth_req = dict(url=alt_url, headers=alt_headers,
body=alt_body)
auth_req[self.alt_part] = alt_auth_req[self.alt_part]
else:
# If alt auth data is None, skip auth in the requested part
auth_req[self.alt_part] = orig_req[self.alt_part]
# Next auth request will be normal, unless otherwise requested
self.reset_alt_auth_data()
return auth_req['url'], auth_req['headers'], auth_req['body']
def reset_alt_auth_data(self):
"""
Configure auth provider to provide valid authentication data
"""
self.alt_part = None
self.alt_auth_data = None
def set_alt_auth_data(self, request_part, auth_data):
"""
Configure auth provider to provide alt authentication data
on a part of the *next* auth_request. If credentials are None,
set invalid data.
:param request_part: request part to contain invalid auth: url,
headers, body
:param auth_data: alternative auth_data from which to get the
invalid data to be injected
"""
self.alt_part = request_part
self.alt_auth_data = auth_data
@abc.abstractmethod
def base_url(self, filters, auth_data=None):
"""
Extracts the base_url based on provided filters
"""
return
class KeystoneAuthProvider(AuthProvider):
token_expiry_threshold = datetime.timedelta(seconds=60)
def __init__(self, credentials, auth_url,
disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
super(KeystoneAuthProvider, self).__init__(credentials)
self.dsvm = disable_ssl_certificate_validation
self.ca_certs = ca_certs
self.trace_requests = trace_requests
self.auth_client = self._auth_client(auth_url)
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
if auth_data is None:
auth_data = self.auth_data
token, _ = auth_data
base_url = self.base_url(filters=filters, auth_data=auth_data)
# build authenticated request
# returns new request, it does not touch the original values
_headers = copy.deepcopy(headers) if headers is not None else {}
_headers['X-Auth-Token'] = str(token)
if url is None or url == "":
_url = base_url
else:
# Join base URL and url, and remove multiple contiguous slashes
_url = "/".join([base_url, url])
parts = [x for x in urlparse.urlparse(_url)]
parts[2] = re.sub("/{2,}", "/", parts[2])
_url = urlparse.urlunparse(parts)
# no change to method or body
return str(_url), _headers, body
@abc.abstractmethod
def _auth_client(self):
return
@abc.abstractmethod
def _auth_params(self):
return
def _get_auth(self):
# Bypasses the cache
auth_func = getattr(self.auth_client, 'get_token')
auth_params = self._auth_params()
# returns token, auth_data
token, auth_data = auth_func(**auth_params)
return token, auth_data
def get_token(self):
return self.auth_data[0]
class KeystoneV2AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def _auth_client(self, auth_url):
return json_v2id.TokenClientJSON(
auth_url, disable_ssl_certificate_validation=self.dsvm,
ca_certs=self.ca_certs, trace_requests=self.trace_requests)
def _auth_params(self):
return dict(
user=self.credentials.username,
password=self.credentials.password,
tenant=self.credentials.tenant_name,
auth_data=True)
def _fill_credentials(self, auth_data_body):
tenant = auth_data_body['token']['tenant']
user = auth_data_body['user']
if self.credentials.tenant_name is None:
self.credentials.tenant_name = tenant['name']
if self.credentials.tenant_id is None:
self.credentials.tenant_id = tenant['id']
if self.credentials.username is None:
self.credentials.username = user['name']
if self.credentials.user_id is None:
self.credentials.user_id = user['id']
def base_url(self, filters, auth_data=None):
"""
Filters can be:
- service: compute, image, etc
- region: the service region
- endpoint_type: adminURL, publicURL, internalURL
- api_version: replace catalog version with this
- skip_path: take just the base URL
"""
if auth_data is None:
auth_data = self.auth_data
token, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
endpoint_type = filters.get('endpoint_type', 'publicURL')
if service is None:
raise exceptions.EndpointNotFound("No service provided")
_base_url = None
for ep in _auth_data['serviceCatalog']:
if ep["type"] == service:
for _ep in ep['endpoints']:
if region is not None and _ep['region'] == region:
_base_url = _ep.get(endpoint_type)
if not _base_url:
# No region matching, use the first
_base_url = ep['endpoints'][0].get(endpoint_type)
break
if _base_url is None:
raise exceptions.EndpointNotFound(service)
parts = urlparse.urlparse(_base_url)
if filters.get('api_version', None) is not None:
path = "/" + filters['api_version']
noversion_path = "/".join(parts.path.split("/")[2:])
if noversion_path != "":
path += "/" + noversion_path
_base_url = _base_url.replace(parts.path, path)
if filters.get('skip_path', None) is not None and parts.path != '':
_base_url = _base_url.replace(parts.path, "/")
return _base_url
def is_expired(self, auth_data):
_, access = auth_data
expiry = datetime.datetime.strptime(access['token']['expires'],
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
class KeystoneV3AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def _auth_client(self, auth_url):
return json_v3id.V3TokenClientJSON(
auth_url, disable_ssl_certificate_validation=self.dsvm,
ca_certs=self.ca_certs, trace_requests=self.trace_requests)
def _auth_params(self):
return dict(
user_id=self.credentials.user_id,
username=self.credentials.username,
password=self.credentials.password,
project_id=self.credentials.project_id,
project_name=self.credentials.project_name,
user_domain_id=self.credentials.user_domain_id,
user_domain_name=self.credentials.user_domain_name,
project_domain_id=self.credentials.project_domain_id,
project_domain_name=self.credentials.project_domain_name,
domain_id=self.credentials.domain_id,
domain_name=self.credentials.domain_name,
auth_data=True)
def _fill_credentials(self, auth_data_body):
# project or domain, depending on the scope
project = auth_data_body.get('project', None)
domain = auth_data_body.get('domain', None)
# user is always there
user = auth_data_body['user']
# Set project fields
if project is not None:
if self.credentials.project_name is None:
self.credentials.project_name = project['name']
if self.credentials.project_id is None:
self.credentials.project_id = project['id']
if self.credentials.project_domain_id is None:
self.credentials.project_domain_id = project['domain']['id']
if self.credentials.project_domain_name is None:
self.credentials.project_domain_name = \
project['domain']['name']
# Set domain fields
if domain is not None:
if self.credentials.domain_id is None:
self.credentials.domain_id = domain['id']
if self.credentials.domain_name is None:
self.credentials.domain_name = domain['name']
# Set user fields
if self.credentials.username is None:
self.credentials.username = user['name']
if self.credentials.user_id is None:
self.credentials.user_id = user['id']
if self.credentials.user_domain_id is None:
self.credentials.user_domain_id = user['domain']['id']
if self.credentials.user_domain_name is None:
self.credentials.user_domain_name = user['domain']['name']
def base_url(self, filters, auth_data=None):
"""
Filters can be:
- service: compute, image, etc
- region: the service region
- endpoint_type: adminURL, publicURL, internalURL
- api_version: replace catalog version with this
- skip_path: take just the base URL
"""
if auth_data is None:
auth_data = self.auth_data
token, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
endpoint_type = filters.get('endpoint_type', 'public')
if service is None:
raise exceptions.EndpointNotFound("No service provided")
if 'URL' in endpoint_type:
endpoint_type = endpoint_type.replace('URL', '')
_base_url = None
catalog = _auth_data['catalog']
# Select entries with matching service type
service_catalog = [ep for ep in catalog if ep['type'] == service]
if len(service_catalog) > 0:
service_catalog = service_catalog[0]['endpoints']
else:
# No matching service
raise exceptions.EndpointNotFound(service)
# Filter by endpoint type (interface)
filtered_catalog = [ep for ep in service_catalog if
ep['interface'] == endpoint_type]
if len(filtered_catalog) == 0:
# No matching type, keep all and try matching by region at least
filtered_catalog = service_catalog
# Filter by region
filtered_catalog = [ep for ep in filtered_catalog if
ep['region'] == region]
if len(filtered_catalog) == 0:
# No matching region, take the first endpoint
filtered_catalog = [service_catalog[0]]
# There should be only one match. If not take the first.
_base_url = filtered_catalog[0].get('url', None)
if _base_url is None:
raise exceptions.EndpointNotFound(service)
parts = urlparse.urlparse(_base_url)
if filters.get('api_version', None) is not None:
path = "/" + filters['api_version']
noversion_path = "/".join(parts.path.split("/")[2:])
if noversion_path != "":
path += "/" + noversion_path
_base_url = _base_url.replace(parts.path, path)
if filters.get('skip_path', None) is not None:
_base_url = _base_url.replace(parts.path, "/")
return _base_url
def is_expired(self, auth_data):
_, access = auth_data
expiry = datetime.datetime.strptime(access['expires_at'],
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
def is_identity_version_supported(identity_version):
return identity_version in IDENTITY_VERSION
def get_credentials(auth_url, fill_in=True, identity_version='v2',
disable_ssl_certificate_validation=None, ca_certs=None,
trace_requests=None, **kwargs):
"""
Builds a credentials object based on the configured auth_version
:param auth_url (string): Full URI of the OpenStack Identity API(Keystone)
which is used to fetch the token from Identity service.
:param fill_in (boolean): obtain a token and fill in all credential
details provided by the identity service. When fill_in is not
specified, credentials are not validated. Validation can be invoked
by invoking ``is_valid()``
:param identity_version (string): identity API version is used to
select the matching auth provider and credentials class
:param disable_ssl_certificate_validation: whether to enforce SSL
certificate validation in SSL API requests to the auth system
:param ca_certs: CA certificate bundle for validation of certificates
in SSL API requests to the auth system
:param trace_requests: trace in log API requests to the auth system
:param kwargs (dict): Dict of credential key/value pairs
Examples:
Returns credentials from the provided parameters:
>>> get_credentials(username='foo', password='bar')
Returns credentials including IDs:
>>> get_credentials(username='foo', password='bar', fill_in=True)
"""
if not is_identity_version_supported(identity_version):
raise exceptions.InvalidIdentityVersion(
identity_version=identity_version)
credential_class, auth_provider_class = IDENTITY_VERSION.get(
identity_version)
creds = credential_class(**kwargs)
# Fill in the credentials fields that were not specified
if fill_in:
dsvm = disable_ssl_certificate_validation
auth_provider = auth_provider_class(
creds, auth_url, disable_ssl_certificate_validation=dsvm,
ca_certs=ca_certs, trace_requests=trace_requests)
creds = auth_provider.fill_credentials()
return creds
class Credentials(object):
"""
Set of credentials for accessing OpenStack services
ATTRIBUTES: list of valid class attributes representing credentials.
"""
ATTRIBUTES = []
def __init__(self, **kwargs):
"""
Enforce the available attributes at init time (only).
Additional attributes can still be set afterwards if tests need
to do so.
"""
self._initial = kwargs
self._apply_credentials(kwargs)
def _apply_credentials(self, attr):
for key in attr.keys():
if key in self.ATTRIBUTES:
setattr(self, key, attr[key])
else:
raise exceptions.InvalidCredentials
def __str__(self):
"""
Represent only attributes included in self.ATTRIBUTES
"""
_repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
return str(_repr)
def __eq__(self, other):
"""
Credentials are equal if attributes in self.ATTRIBUTES are equal
"""
return str(self) == str(other)
def __getattr__(self, key):
# If an attribute is set, __getattr__ is not invoked
# If an attribute is not set, and it is a known one, return None
if key in self.ATTRIBUTES:
return None
else:
raise AttributeError
def __delitem__(self, key):
# For backwards compatibility, support dict behaviour
if key in self.ATTRIBUTES:
delattr(self, key)
else:
raise AttributeError
def get(self, item, default):
# In this patch act as dict for backward compatibility
try:
return getattr(self, item)
except AttributeError:
return default
def get_init_attributes(self):
return self._initial.keys()
def is_valid(self):
raise NotImplementedError
def reset(self):
# First delete all known attributes
for key in self.ATTRIBUTES:
if getattr(self, key) is not None:
delattr(self, key)
# Then re-apply initial setup
self._apply_credentials(self._initial)
class KeystoneV2Credentials(Credentials):
ATTRIBUTES = ['username', 'password', 'tenant_name', 'user_id',
'tenant_id']
def is_valid(self):
"""
Minimum set of valid credentials, are username and password.
Tenant is optional.
"""
return None not in (self.username, self.password)
class KeystoneV3Credentials(Credentials):
"""
Credentials suitable for the Keystone Identity V3 API
"""
ATTRIBUTES = ['domain_id', 'domain_name', 'password', 'username',
'project_domain_id', 'project_domain_name', 'project_id',
'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
'user_domain_name', 'user_id']
def __setattr__(self, key, value):
parent = super(KeystoneV3Credentials, self)
# for tenant_* set both project and tenant
if key == 'tenant_id':
parent.__setattr__('project_id', value)
elif key == 'tenant_name':
parent.__setattr__('project_name', value)
# for project_* set both project and tenant
if key == 'project_id':
parent.__setattr__('tenant_id', value)
elif key == 'project_name':
parent.__setattr__('tenant_name', value)
# for *_domain_* set both user and project if not set yet
if key == 'user_domain_id':
if self.project_domain_id is None:
parent.__setattr__('project_domain_id', value)
if key == 'project_domain_id':
if self.user_domain_id is None:
parent.__setattr__('user_domain_id', value)
if key == 'user_domain_name':
if self.project_domain_name is None:
parent.__setattr__('project_domain_name', value)
if key == 'project_domain_name':
if self.user_domain_name is None:
parent.__setattr__('user_domain_name', value)
# support domain_name coming from config
if key == 'domain_name':
parent.__setattr__('user_domain_name', value)
parent.__setattr__('project_domain_name', value)
# finally trigger default behaviour for all attributes
parent.__setattr__(key, value)
def is_valid(self):
"""
Valid combinations of v3 credentials (excluding token, scope)
- User id, password (optional domain)
- User name, password and its domain id/name
For the scope, valid combinations are:
- None
- Project id (optional domain)
- Project name and its domain id/name
- Domain id
- Domain name
"""
valid_user_domain = any(
[self.user_domain_id is not None,
self.user_domain_name is not None])
valid_project_domain = any(
[self.project_domain_id is not None,
self.project_domain_name is not None])
valid_user = any(
[self.user_id is not None,
self.username is not None and valid_user_domain])
valid_project_scope = any(
[self.project_name is None and self.project_id is None,
self.project_id is not None,
self.project_name is not None and valid_project_domain])
valid_domain_scope = any(
[self.domain_id is None and self.domain_name is None,
self.domain_id or self.domain_name])
return all([self.password is not None,
valid_user,
valid_project_scope and valid_domain_scope])
IDENTITY_VERSION = {'v2': (KeystoneV2Credentials, KeystoneV2AuthProvider),
'v3': (KeystoneV3Credentials, KeystoneV3AuthProvider)}

View File

@ -1,418 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from tempest.common import cred_provider
from tempest.common import negative_rest_client
from tempest import config
from tempest import manager
from tempest.services.baremetal.v1.json.baremetal_client import \
BaremetalClientJSON
from tempest.services import botoclients
from tempest.services.compute.json.agents_client import \
AgentsClientJSON
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
from tempest.services.compute.json.availability_zone_client import \
AvailabilityZoneClientJSON
from tempest.services.compute.json.baremetal_nodes_client import \
BaremetalNodesClientJSON
from tempest.services.compute.json.certificates_client import \
CertificatesClientJSON
from tempest.services.compute.json.extensions_client import \
ExtensionsClientJSON
from tempest.services.compute.json.fixed_ips_client import FixedIPsClientJSON
from tempest.services.compute.json.flavors_client import FlavorsClientJSON
from tempest.services.compute.json.floating_ips_client import \
FloatingIPsClientJSON
from tempest.services.compute.json.hosts_client import HostsClientJSON
from tempest.services.compute.json.hypervisor_client import \
HypervisorClientJSON
from tempest.services.compute.json.images_client import ImagesClientJSON
from tempest.services.compute.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClientJSON
from tempest.services.compute.json.interfaces_client import \
InterfacesClientJSON
from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
from tempest.services.compute.json.limits_client import LimitsClientJSON
from tempest.services.compute.json.migrations_client import \
MigrationsClientJSON
from tempest.services.compute.json.networks_client import NetworksClientJSON
from tempest.services.compute.json.quotas_client import QuotaClassesClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
from tempest.services.compute.json.security_group_default_rules_client import \
SecurityGroupDefaultRulesClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
from tempest.services.compute.json.services_client import ServicesClientJSON
from tempest.services.compute.json.tenant_networks_client import \
TenantNetworksClientJSON
from tempest.services.compute.json.tenant_usages_client import \
TenantUsagesClientJSON
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClientJSON
from tempest.services.data_processing.v1_1.data_processing_client import \
DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClientJSON
from tempest.services.database.json.limits_client import \
DatabaseLimitsClientJSON
from tempest.services.database.json.versions_client import \
DatabaseVersionsClientJSON
from tempest.services.identity.v2.json.identity_client import \
IdentityClientJSON
from tempest.services.identity.v2.json.token_client import TokenClientJSON
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClientJSON
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClientJSON
from tempest.services.identity.v3.json.identity_client import \
IdentityV3ClientJSON
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
from tempest.services.identity.v3.json.region_client import RegionClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from tempest.services.identity.v3.json.token_client import V3TokenClientJSON
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
from tempest.services.messaging.json.messaging_client import \
MessagingClientJSON
from tempest.services.network.json.network_client import NetworkClientJSON
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClientJSON
from tempest.services.volume.json.admin.volume_hosts_client import \
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClientJSON
from tempest.services.volume.json.admin.volume_services_client import \
VolumesServicesClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.availability_zone_client import \
VolumeAvailabilityZoneClientJSON
from tempest.services.volume.json.backups_client import BackupsClientJSON
from tempest.services.volume.json.extensions_client import \
ExtensionsClientJSON as VolumeExtensionClientJSON
from tempest.services.volume.json.qos_client import QosSpecsClientJSON
from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
from tempest.services.volume.json.volumes_client import VolumesClientJSON
from tempest.services.volume.v2.json.admin.volume_hosts_client import \
VolumeHostsV2ClientJSON
from tempest.services.volume.v2.json.admin.volume_quotas_client import \
VolumeQuotasV2Client
from tempest.services.volume.v2.json.admin.volume_services_client import \
VolumesServicesV2ClientJSON
from tempest.services.volume.v2.json.admin.volume_types_client import \
VolumeTypesV2ClientJSON
from tempest.services.volume.v2.json.availability_zone_client import \
VolumeV2AvailabilityZoneClientJSON
from tempest.services.volume.v2.json.backups_client import BackupsClientV2JSON
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
from tempest.services.volume.v2.json.snapshots_client import \
SnapshotsV2ClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_compute_clients()
self._set_database_clients()
self._set_identity_clients()
self._set_volume_clients()
self._set_object_storage_clients()
self.baremetal_client = BaremetalClientJSON(
self.auth_provider,
CONF.baremetal.catalog_type,
CONF.identity.region,
endpoint_type=CONF.baremetal.endpoint_type,
**self.default_params_with_timeout_values)
self.network_client = NetworkClientJSON(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.messaging_client = MessagingClientJSON(
self.auth_provider,
CONF.messaging.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
self.auth_provider,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**self.default_params_with_timeout_values)
if CONF.service_available.glance:
self.image_client = ImageClientJSON(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.image_client_v2 = ImageClientV2JSON(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.data_processing_client = DataProcessingClient(
self.auth_provider,
CONF.data_processing.catalog_type,
CONF.identity.region,
endpoint_type=CONF.data_processing.endpoint_type,
**self.default_params_with_timeout_values)
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
# Generating EC2 credentials in tempest is only supported
# with identity v2
if CONF.identity_feature_enabled.api_v2 and \
CONF.identity.auth_version == 'v2':
# EC2 and S3 clients, if used, will check onfigured AWS credentials
# and generate new ones if needed
self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
self.s3_client = botoclients.ObjectClientS3(self.identity_client)
def _set_compute_clients(self):
params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
params.update(self.default_params)
self.agents_client = AgentsClientJSON(self.auth_provider, **params)
self.networks_client = NetworksClientJSON(self.auth_provider, **params)
self.migrations_client = MigrationsClientJSON(self.auth_provider,
**params)
self.security_group_default_rules_client = (
SecurityGroupDefaultRulesClientJSON(self.auth_provider, **params))
self.certificates_client = CertificatesClientJSON(self.auth_provider,
**params)
self.servers_client = ServersClientJSON(
self.auth_provider,
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
self.limits_client = LimitsClientJSON(self.auth_provider, **params)
self.images_client = ImagesClientJSON(self.auth_provider, **params)
self.keypairs_client = KeyPairsClientJSON(self.auth_provider, **params)
self.quotas_client = QuotasClientJSON(self.auth_provider, **params)
self.quota_classes_client = QuotaClassesClientJSON(self.auth_provider,
**params)
self.flavors_client = FlavorsClientJSON(self.auth_provider, **params)
self.extensions_client = ExtensionsClientJSON(self.auth_provider,
**params)
self.floating_ips_client = FloatingIPsClientJSON(self.auth_provider,
**params)
self.security_groups_client = SecurityGroupsClientJSON(
self.auth_provider, **params)
self.interfaces_client = InterfacesClientJSON(self.auth_provider,
**params)
self.fixed_ips_client = FixedIPsClientJSON(self.auth_provider,
**params)
self.availability_zone_client = AvailabilityZoneClientJSON(
self.auth_provider, **params)
self.aggregates_client = AggregatesClientJSON(self.auth_provider,
**params)
self.services_client = ServicesClientJSON(self.auth_provider, **params)
self.tenant_usages_client = TenantUsagesClientJSON(self.auth_provider,
**params)
self.hosts_client = HostsClientJSON(self.auth_provider, **params)
self.hypervisor_client = HypervisorClientJSON(self.auth_provider,
**params)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClientJSON(self.auth_provider, **params)
self.tenant_networks_client = \
TenantNetworksClientJSON(self.auth_provider, **params)
self.baremetal_nodes_client = BaremetalNodesClientJSON(
self.auth_provider, **params)
# NOTE: The following client needs special timeout values because
# the API is a proxy for the other component.
params_volume = copy.deepcopy(params)
params_volume.update({
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
})
self.volumes_extensions_client = VolumesExtensionsClientJSON(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params_volume)
def _set_database_clients(self):
self.database_flavors_client = DatabaseFlavorsClientJSON(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_limits_client = DatabaseLimitsClientJSON(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_versions_client = DatabaseVersionsClientJSON(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region,
'endpoint_type': 'adminURL'
}
params.update(self.default_params_with_timeout_values)
self.identity_client = IdentityClientJSON(self.auth_provider,
**params)
self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider,
**params)
self.endpoints_client = EndPointClientJSON(self.auth_provider,
**params)
self.service_client = ServiceClientJSON(self.auth_provider, **params)
self.policy_client = PolicyClientJSON(self.auth_provider, **params)
self.region_client = RegionClientJSON(self.auth_provider, **params)
self.credentials_client = CredentialsClientJSON(self.auth_provider,
**params)
# Token clients do not use the catalog. They only need default_params.
self.token_client = TokenClientJSON(CONF.identity.uri,
**self.default_params)
if CONF.identity_feature_enabled.api_v3:
self.token_v3_client = V3TokenClientJSON(CONF.identity.uri_v3,
**self.default_params)
def _set_volume_clients(self):
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(self.default_params)
self.volume_qos_client = QosSpecsClientJSON(self.auth_provider,
**params)
self.volume_qos_v2_client = QosSpecsV2ClientJSON(
self.auth_provider, **params)
self.volume_services_v2_client = VolumesServicesV2ClientJSON(
self.auth_provider, **params)
self.backups_client = BackupsClientJSON(self.auth_provider, **params)
self.backups_v2_client = BackupsClientV2JSON(self.auth_provider,
**params)
self.snapshots_client = SnapshotsClientJSON(self.auth_provider,
**params)
self.snapshots_v2_client = SnapshotsV2ClientJSON(self.auth_provider,
**params)
self.volumes_client = VolumesClientJSON(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volumes_v2_client = VolumesV2ClientJSON(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volume_types_client = VolumeTypesClientJSON(self.auth_provider,
**params)
self.volume_services_client = VolumesServicesClientJSON(
self.auth_provider, **params)
self.volume_hosts_client = VolumeHostsClientJSON(self.auth_provider,
**params)
self.volume_hosts_v2_client = VolumeHostsV2ClientJSON(
self.auth_provider, **params)
self.volume_quotas_client = VolumeQuotasClientJSON(self.auth_provider,
**params)
self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
**params)
self.volumes_extension_client = VolumeExtensionClientJSON(
self.auth_provider, **params)
self.volumes_v2_extension_client = VolumeV2ExtensionClientJSON(
self.auth_provider, **params)
self.volume_availability_zone_client = \
VolumeAvailabilityZoneClientJSON(self.auth_provider, **params)
self.volume_v2_availability_zone_client = \
VolumeV2AvailabilityZoneClientJSON(self.auth_provider, **params)
self.volume_types_v2_client = VolumeTypesV2ClientJSON(
self.auth_provider, **params)
def _set_object_storage_clients(self):
params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
params.update(self.default_params_with_timeout_values)
self.account_client = AccountClient(self.auth_provider, **params)
self.container_client = ContainerClient(self.auth_provider, **params)
self.object_client = ObjectClient(self.auth_provider, **params)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, service=None):
super(AdminManager, self).__init__(
credentials=cred_provider.get_configured_credentials(
'identity_admin'),
service=service)

View File

@ -1,349 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
from oslo_concurrency import lockutils
from oslo_log import log as logging
import yaml
from tempest import clients
from tempest.common import cred_provider
from tempest.common import fixed_network
from tempest import config
from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
def read_accounts_yaml(path):
yaml_file = open(path, 'r')
accounts = yaml.load(yaml_file)
return accounts
class Accounts(cred_provider.CredentialProvider):
def __init__(self, identity_version=None, name=None):
super(Accounts, self).__init__(identity_version=identity_version,
name=name)
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
self.use_default_creds = False
else:
accounts = {}
self.use_default_creds = True
self.hash_dict = self.get_hash_dict(accounts)
self.accounts_dir = os.path.join(lockutils.get_lock_path(CONF),
'test_accounts')
self.isolated_creds = {}
@classmethod
def _append_role(cls, role, account_hash, hash_dict):
if role in hash_dict['roles']:
hash_dict['roles'][role].append(account_hash)
else:
hash_dict['roles'][role] = [account_hash]
return hash_dict
@classmethod
def get_hash_dict(cls, accounts):
hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
# Loop over the accounts read from the yaml file
for account in accounts:
roles = []
types = []
resources = []
if 'roles' in account:
roles = account.pop('roles')
if 'types' in account:
types = account.pop('types')
if 'resources' in account:
resources = account.pop('resources')
temp_hash = hashlib.md5()
temp_hash.update(str(account))
temp_hash_key = temp_hash.hexdigest()
hash_dict['creds'][temp_hash_key] = account
for role in roles:
hash_dict = cls._append_role(role, temp_hash_key,
hash_dict)
# If types are set for the account append the matching role
# subdict with the hash
for type in types:
if type == 'admin':
hash_dict = cls._append_role(CONF.identity.admin_role,
temp_hash_key, hash_dict)
elif type == 'operator':
hash_dict = cls._append_role(
CONF.object_storage.operator_role, temp_hash_key,
hash_dict)
elif type == 'reseller_admin':
hash_dict = cls._append_role(
CONF.object_storage.reseller_admin_role,
temp_hash_key,
hash_dict)
# Populate the network subdict
for resource in resources:
if resource == 'network':
hash_dict['networks'][temp_hash_key] = resources[resource]
else:
LOG.warning('Unkown resource type %s, ignoring this field'
% resource)
return hash_dict
def is_multi_user(self):
# Default credentials is not a valid option with locking Account
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
else:
return len(self.hash_dict['creds']) > 1
def is_multi_tenant(self):
return self.is_multi_user()
def _create_hash_file(self, hash_string):
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
if not os.path.isfile(path):
with open(path, 'w') as fd:
fd.write(self.name)
return True
return False
@lockutils.synchronized('test_accounts_io', external=True)
def _get_free_hash(self, hashes):
# Cast as a list because in some edge cases a set will be passed in
hashes = list(hashes)
if not os.path.isdir(self.accounts_dir):
os.mkdir(self.accounts_dir)
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
return hashes[0]
names = []
for _hash in hashes:
res = self._create_hash_file(_hash)
if res:
return _hash
else:
path = os.path.join(os.path.join(self.accounts_dir,
_hash))
with open(path, 'r') as fd:
names.append(fd.read())
msg = ('Insufficient number of users provided. %s have allocated all '
'the credentials for this allocation request' % ','.join(names))
raise exceptions.InvalidConfiguration(msg)
def _get_match_hash_list(self, roles=None):
hashes = []
if roles:
# Loop over all the creds for each role in the subdict and generate
# a list of cred lists for each role
for role in roles:
temp_hashes = self.hash_dict['roles'].get(role, None)
if not temp_hashes:
raise exceptions.InvalidConfiguration(
"No credentials with role: %s specified in the "
"accounts ""file" % role)
hashes.append(temp_hashes)
# Take the list of lists and do a boolean and between each list to
# find the creds which fall under all the specified roles
temp_list = set(hashes[0])
for hash_list in hashes[1:]:
temp_list = temp_list & set(hash_list)
hashes = temp_list
else:
hashes = self.hash_dict['creds'].keys()
# NOTE(mtreinish): admin is a special case because of the increased
# privlege set which could potentially cause issues on tests where that
# is not expected. So unless the admin role isn't specified do not
# allocate admin.
admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
None)
if ((not roles or CONF.identity.admin_role not in roles) and
admin_hashes):
useable_hashes = [x for x in hashes if x not in admin_hashes]
else:
useable_hashes = hashes
return useable_hashes
def _sanitize_creds(self, creds):
temp_creds = creds.copy()
temp_creds.pop('password')
return temp_creds
def _get_creds(self, roles=None):
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
useable_hashes = self._get_match_hash_list(roles)
free_hash = self._get_free_hash(useable_hashes)
clean_creds = self._sanitize_creds(
self.hash_dict['creds'][free_hash])
LOG.info('%s allocated creds:\n%s' % (self.name, clean_creds))
return self._wrap_creds_with_network(free_hash)
@lockutils.synchronized('test_accounts_io', external=True)
def remove_hash(self, hash_string):
hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
'one did not exist' % hash_path)
else:
os.remove(hash_path)
if not os.listdir(self.accounts_dir):
os.rmdir(self.accounts_dir)
def get_hash(self, creds):
for _hash in self.hash_dict['creds']:
# Comparing on the attributes that are expected in the YAML
init_attributes = creds.get_init_attributes()
hash_attributes = self.hash_dict['creds'][_hash].copy()
if ('user_domain_name' in init_attributes and 'user_domain_name'
not in hash_attributes):
# Allow for the case of domain_name populated from config
domain_name = CONF.identity.admin_domain_name
hash_attributes['user_domain_name'] = domain_name
if all([getattr(creds, k) == hash_attributes[k] for
k in init_attributes]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
def remove_credentials(self, creds):
_hash = self.get_hash(creds)
clean_creds = self._sanitize_creds(self.hash_dict['creds'][_hash])
self.remove_hash(_hash)
LOG.info("%s returned allocated creds:\n%s" % (self.name, clean_creds))
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
net_creds = self._get_creds()
self.isolated_creds['primary'] = net_creds
return net_creds
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
net_creds = self._get_creds()
self.isolated_creds['alt'] = net_creds
return net_creds
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
exist_creds = self.isolated_creds.get(str(roles), None)
# The force kwarg is used to allocate an additional set of creds with
# the same role list. The index used for the previously allocation
# in the isolated_creds dict will be moved.
if exist_creds and not force_new:
return exist_creds
elif exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self.isolated_creds))
self.isolated_creds[new_index] = exist_creds
net_creds = self._get_creds(roles=roles)
self.isolated_creds[str(roles)] = net_creds
return net_creds
def clear_isolated_creds(self):
for creds in self.isolated_creds.values():
self.remove_credentials(creds)
def get_admin_creds(self):
return self.get_creds_by_roles([CONF.identity.admin_role])
def is_role_available(self, role):
if self.use_default_creds:
return False
else:
if self.hash_dict['roles'].get(role):
return True
return False
def admin_available(self):
return self.is_role_available(CONF.identity.admin_role)
def _wrap_creds_with_network(self, hash):
creds_dict = self.hash_dict['creds'][hash]
credential = cred_provider.get_credentials(
identity_version=self.identity_version, **creds_dict)
net_creds = cred_provider.TestResources(credential)
net_clients = clients.Manager(credentials=credential)
compute_network_client = net_clients.networks_client
net_name = self.hash_dict['networks'].get(hash, None)
network = fixed_network.get_network_from_name(
net_name, compute_network_client)
net_creds.set_resources(network=network)
return net_creds
class NotLockingAccounts(Accounts):
"""Credentials provider which always returns the first and second
configured accounts as primary and alt users.
This credential provider can be used in case of serial test execution
to preserve the current behaviour of the serial tempest run.
"""
def _unique_creds(self, cred_arg=None):
"""Verify that the configured credentials are valid and distinct """
try:
user = self.get_primary_creds()
alt_user = self.get_alt_creds()
return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
except exceptions.InvalidCredentials as ic:
msg = "At least one of the configured credentials is " \
"not valid: %s" % ic.message
raise exceptions.InvalidConfiguration(msg)
def is_multi_user(self):
return self._unique_creds('username')
def is_multi_tenant(self):
return self._unique_creds('tenant_id')
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
primary_credential = cred_provider.get_configured_credentials(
credential_type='user', identity_version=self.identity_version)
self.isolated_creds['primary'] = cred_provider.TestResources(
primary_credential)
return self.isolated_creds['primary']
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
alt_credential = cred_provider.get_configured_credentials(
credential_type='alt_user',
identity_version=self.identity_version)
self.isolated_creds['alt'] = cred_provider.TestResources(
alt_credential)
return self.isolated_creds['alt']
def clear_isolated_creds(self):
self.isolated_creds = {}
def get_admin_creds(self):
creds = cred_provider.get_configured_credentials(
"identity_admin", fill_in=False)
self.isolated_creds['admin'] = cred_provider.TestResources(creds)
return self.isolated_creds['admin']
def get_creds_by_roles(self, roles, force_new=False):
msg = "Credentials being specified through the config file can not be"\
" used with tests that specify using credentials by roles. "\
"Either exclude/skip the tests doing this or use either an "\
"test_accounts_file or tenant isolation."
raise exceptions.InvalidConfiguration(msg)

View File

@ -1,39 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import subprocess
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def copy_file_to_host(file_from, dest, host, username, pkey):
dest = "%s@%s:%s" % (username, host, dest)
cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
"-o StrictHostKeyChecking=no " \
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
'file1': file_from,
'dest': dest}
args = shlex.split(cmd.encode('utf-8'))
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
proc = subprocess.Popen(args, **subprocess_args)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
LOG.error(("Command {0} returned with exit status {1},"
"output {2}, error {3}").format(cmd, proc.returncode,
stdout, stderr))
return stdout

View File

@ -1,173 +0,0 @@
# Copyright (c) 2014 Deutsche Telekom AG
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_log import log as logging
import six
from tempest import auth
from tempest import config
from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Type of credentials available from configuration
CREDENTIAL_TYPES = {
'identity_admin': ('identity', 'admin'),
'user': ('identity', None),
'alt_user': ('identity', 'alt')
}
DEFAULT_PARAMS = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# Read credentials from configuration, builds a Credentials object
# based on the specified or configured version
def get_configured_credentials(credential_type, fill_in=True,
identity_version=None):
identity_version = identity_version or CONF.identity.auth_version
if identity_version not in ('v2', 'v3'):
raise exceptions.InvalidConfiguration(
'Unsupported auth version: %s' % identity_version)
if credential_type not in CREDENTIAL_TYPES:
raise exceptions.InvalidCredentials()
conf_attributes = ['username', 'password', 'tenant_name']
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
params = DEFAULT_PARAMS.copy()
section, prefix = CREDENTIAL_TYPES[credential_type]
for attr in conf_attributes:
_section = getattr(CONF, section)
if prefix is None:
params[attr] = getattr(_section, attr)
else:
params[attr] = getattr(_section, prefix + "_" + attr)
# Build and validate credentials. We are reading configured credentials,
# so validate them even if fill_in is False
credentials = get_credentials(fill_in=fill_in,
identity_version=identity_version, **params)
if not fill_in:
if not credentials.is_valid():
msg = ("The %s credentials are incorrectly set in the config file."
" Double check that all required values are assigned" %
credential_type)
raise exceptions.InvalidConfiguration(msg)
return credentials
# Wrapper around auth.get_credentials to use the configured identity version
# is none is specified
def get_credentials(fill_in=True, identity_version=None, **kwargs):
params = dict(DEFAULT_PARAMS, **kwargs)
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
if identity_version == 'v3':
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
if not domain_fields.intersection(kwargs.keys()):
# TODO(andreaf) It might be better here to use a dedicated config
# option such as CONF.auth.tenant_isolation_domain_name
params['user_domain_name'] = CONF.identity.admin_domain_name
auth_url = CONF.identity.uri_v3
else:
auth_url = CONF.identity.uri
return auth.get_credentials(auth_url,
fill_in=fill_in,
identity_version=identity_version,
**params)
@six.add_metaclass(abc.ABCMeta)
class CredentialProvider(object):
def __init__(self, identity_version=None, name=None, password='pass',
network_resources=None):
"""A CredentialProvider supplies credentials to test classes.
:param identity_version If specified it will return credentials of the
corresponding identity version, otherwise it
uses auth_version from configuration
:param name Name of the calling test. Included in provisioned
credentials when credentials are provisioned on the fly
:param password Used for provisioned credentials when credentials are
provisioned on the fly
:param network_resources Network resources required for the credentials
"""
# TODO(andreaf) name and password are tenant isolation specific, and
# could be removed from this abstract class
self.name = name or "test_creds"
self.identity_version = identity_version or CONF.identity.auth_version
if not auth.is_identity_version_supported(self.identity_version):
raise exceptions.InvalidIdentityVersion(
identity_version=self.identity_version)
@abc.abstractmethod
def get_primary_creds(self):
return
@abc.abstractmethod
def get_admin_creds(self):
return
@abc.abstractmethod
def get_alt_creds(self):
return
@abc.abstractmethod
def clear_isolated_creds(self):
return
@abc.abstractmethod
def is_multi_user(self):
return
@abc.abstractmethod
def is_multi_tenant(self):
return
@abc.abstractmethod
def get_creds_by_roles(self, roles, force_new=False):
return
@abc.abstractmethod
def is_role_available(self, role):
return
class TestResources(object):
"""Readonly Credentials, with network resources added."""
def __init__(self, credentials):
self._credentials = credentials
self.network = None
self.subnet = None
self.router = None
def __getattr__(self, item):
return getattr(self._credentials, item)
def set_resources(self, **kwargs):
for key in kwargs.keys():
if hasattr(self, key):
setattr(self, key, kwargs[key])
@property
def credentials(self):
return self._credentials

View File

@ -1,93 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tempest.common import accounts
from tempest.common import cred_provider
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
CONF = config.CONF
# Return the right implementation of CredentialProvider based on config
# Dropping interface and password, as they are never used anyways
# TODO(andreaf) Drop them from the CredentialsProvider interface completely
def get_isolated_credentials(name, network_resources=None,
force_tenant_isolation=False,
identity_version=None):
# If a test requires a new account to work, it can have it via forcing
# tenant isolation. A new account will be produced only for that test.
# In case admin credentials are not available for the account creation,
# the test should be skipped else it would fail.
if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
return isolated_creds.IsolatedCreds(
name=name,
network_resources=network_resources,
identity_version=identity_version)
else:
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
# Most params are not relevant for pre-created accounts
return accounts.Accounts(name=name,
identity_version=identity_version)
else:
return accounts.NotLockingAccounts(
name=name, identity_version=identity_version)
# We want a helper function here to check and see if admin credentials
# are available so we can do a single call from skip_checks if admin
# creds area vailable.
def is_admin_available():
is_admin = True
# If tenant isolation is enabled admin will be available
if CONF.auth.allow_tenant_isolation:
return is_admin
# Check whether test accounts file has the admin specified or not
elif (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
check_accounts = accounts.Accounts(name='check_admin')
if not check_accounts.admin_available():
is_admin = False
else:
try:
cred_provider.get_configured_credentials('identity_admin',
fill_in=False)
except exceptions.InvalidConfiguration:
is_admin = False
return is_admin
# We want a helper function here to check and see if alt credentials
# are available so we can do a single call from skip_checks if alt
# creds area vailable.
def is_alt_available():
# If tenant isolation is enabled admin will be available
if CONF.auth.allow_tenant_isolation:
return True
# Check whether test accounts file has the admin specified or not
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
check_accounts = accounts.Accounts(name='check_alt')
else:
check_accounts = accounts.NotLockingAccounts(name='check_alt')
try:
if not check_accounts.is_multi_user():
return False
else:
return True
except exceptions.InvalidConfiguration:
return False

View File

@ -1,226 +0,0 @@
# Copyright 2013 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from testtools import helpers
class ExistsAllResponseHeaders(object):
"""
Specific matcher to check the existence of Swift's response headers
This matcher checks the existence of common headers for each HTTP method
or the target, which means account, container or object.
When checking the existence of 'specific' headers such as
X-Account-Meta-* or X-Object-Manifest for example, those headers must be
checked in each test code.
"""
def __init__(self, target, method):
"""
param: target Account/Container/Object
param: method PUT/GET/HEAD/DELETE/COPY/POST
"""
self.target = target
self.method = method
def match(self, actual):
"""
param: actual HTTP response headers
"""
# Check common headers for all HTTP methods
if 'content-length' not in actual:
return NonExistentHeader('content-length')
if 'content-type' not in actual:
return NonExistentHeader('content-type')
if 'x-trans-id' not in actual:
return NonExistentHeader('x-trans-id')
if 'date' not in actual:
return NonExistentHeader('date')
# Check headers for a specific method or target
if self.method == 'GET' or self.method == 'HEAD':
if 'x-timestamp' not in actual:
return NonExistentHeader('x-timestamp')
if 'accept-ranges' not in actual:
return NonExistentHeader('accept-ranges')
if self.target == 'Account':
if 'x-account-bytes-used' not in actual:
return NonExistentHeader('x-account-bytes-used')
if 'x-account-container-count' not in actual:
return NonExistentHeader('x-account-container-count')
if 'x-account-object-count' not in actual:
return NonExistentHeader('x-account-object-count')
elif self.target == 'Container':
if 'x-container-bytes-used' not in actual:
return NonExistentHeader('x-container-bytes-used')
if 'x-container-object-count' not in actual:
return NonExistentHeader('x-container-object-count')
elif self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
if 'last-modified' not in actual:
return NonExistentHeader('last-modified')
elif self.method == 'PUT':
if self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
if 'last-modified' not in actual:
return NonExistentHeader('last-modified')
elif self.method == 'COPY':
if self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
if 'last-modified' not in actual:
return NonExistentHeader('last-modified')
if 'x-copied-from' not in actual:
return NonExistentHeader('x-copied-from')
if 'x-copied-from-last-modified' not in actual:
return NonExistentHeader('x-copied-from-last-modified')
return None
class NonExistentHeader(object):
"""
Informs an error message for end users in the case of missing a
certain header in Swift's responses
"""
def __init__(self, header):
self.header = header
def describe(self):
return "%s header does not exist" % self.header
def get_details(self):
return {}
class AreAllWellFormatted(object):
"""
Specific matcher to check the correctness of formats of values of Swift's
response headers
This matcher checks the format of values of response headers.
When checking the format of values of 'specific' headers such as
X-Account-Meta-* or X-Object-Manifest for example, those values must be
checked in each test code.
"""
def match(self, actual):
for key, value in actual.iteritems():
if key in ('content-length', 'x-account-bytes-used',
'x-account-container-count', 'x-account-object-count',
'x-container-bytes-used', 'x-container-object-count')\
and not value.isdigit():
return InvalidFormat(key, value)
elif key in ('content-type', 'date', 'last-modified',
'x-copied-from-last-modified') and not value:
return InvalidFormat(key, value)
elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
return InvalidFormat(key, value)
elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
return InvalidFormat(key, value)
elif key == 'x-trans-id' and \
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
return InvalidFormat(key, value)
elif key == 'accept-ranges' and not value == 'bytes':
return InvalidFormat(key, value)
elif key == 'etag' and not value.isalnum():
return InvalidFormat(key, value)
elif key == 'transfer-encoding' and not value == 'chunked':
return InvalidFormat(key, value)
return None
class InvalidFormat(object):
"""
Informs an error message for end users if a format of a certain header
is invalid
"""
def __init__(self, key, value):
self.key = key
self.value = value
def describe(self):
return "InvalidFormat (%s, %s)" % (self.key, self.value)
def get_details(self):
return {}
class MatchesDictExceptForKeys(object):
"""Matches two dictionaries. Verifies all items are equals except for those
identified by a list of keys.
"""
def __init__(self, expected, excluded_keys=None):
self.expected = expected
self.excluded_keys = excluded_keys if excluded_keys is not None else []
def match(self, actual):
filtered_expected = helpers.dict_subtract(self.expected,
self.excluded_keys)
filtered_actual = helpers.dict_subtract(actual,
self.excluded_keys)
if filtered_actual != filtered_expected:
return DictMismatch(filtered_expected, filtered_actual)
class DictMismatch(object):
"""Mismatch between two dicts describes deltas"""
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
self.intersect = set(self.expected) & set(self.actual)
self.symmetric_diff = set(self.expected) ^ set(self.actual)
def _format_dict(self, dict_to_format):
# Ensure the error string dict is printed in a set order
# NOTE(mtreinish): needed to ensure a deterministic error msg for
# testing. Otherwise the error message will be dependent on the
# dict ordering.
dict_string = "{"
for key in sorted(dict_to_format):
dict_string += "'%s': %s, " % (key, dict_to_format[key])
dict_string = dict_string[:-2] + '}'
return dict_string
def describe(self):
msg = ""
if self.symmetric_diff:
only_expected = helpers.dict_subtract(self.expected, self.actual)
only_actual = helpers.dict_subtract(self.actual, self.expected)
if only_expected:
msg += "Only in expected:\n %s\n" % self._format_dict(
only_expected)
if only_actual:
msg += "Only in actual:\n %s\n" % self._format_dict(
only_actual)
diff_set = set(o for o in self.intersect if
self.expected[o] != self.actual[o])
if diff_set:
msg += "Differences:\n"
for o in diff_set:
msg += " %s: expected %s, actual %s\n" % (
o, self.expected[o], self.actual[o])
return msg
def get_details(self):
return {}

View File

@ -1,130 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
from tempest import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
def get_network_from_name(name, compute_networks_client):
"""Get a full network dict from just a network name
:param str name: the name of the network to use
:param NetworksClientJSON compute_networks_client: The network client
object to use for making the network lists api request
:return: The full dictionary for the network in question, unless the
network for the supplied name can not be found. In which case a dict
with just the name will be returned.
:rtype: dict
"""
caller = misc_utils.find_test_caller()
if not name:
network = {'name': name}
else:
try:
resp = compute_networks_client.list_networks(name=name)
if isinstance(resp, list):
networks = resp
elif isinstance(resp, dict):
networks = resp['networks']
else:
raise lib_exc.NotFound()
if len(networks) > 0:
network = networks[0]
else:
msg = "Network with name: %s not found" % name
if caller:
LOG.warning('(%s) %s' % (caller, msg))
else:
LOG.warning(msg)
raise lib_exc.NotFound()
# To be consistent with network isolation, add name is only
# label is available
name = network.get('name', network.get('label'))
if name:
network['name'] = name
else:
raise lib_exc.NotFound()
except lib_exc.NotFound:
# In case of nova network, if the fixed_network_name is not
# owned by the tenant, and the network client is not an admin
# one, list_networks will not find it
msg = ('Unable to find network %s. '
'Starting instance without specifying a network.' %
name)
if caller:
LOG.info('(%s) %s' % (caller, msg))
else:
LOG.info(msg)
network = {'name': name}
return network
def get_tenant_network(creds_provider, compute_networks_client):
"""Get a network usable by the primary tenant
:param creds_provider: instance of credential provider
:param compute_networks_client: compute network client. We want to have the
compute network client so we can have use a common approach for both
neutron and nova-network cases. If this is not an admin network
client, set_network_kwargs might fail in case fixed_network_name
is the network to be used, and it's not visible to the tenant
:return a dict with 'id' and 'name' of the network
"""
caller = misc_utils.find_test_caller()
fixed_network_name = CONF.compute.fixed_network_name
net_creds = creds_provider.get_primary_creds()
network = getattr(net_creds, 'network', None)
if not network or not network.get('name'):
if fixed_network_name:
msg = ('No valid network provided or created, defaulting to '
'fixed_network_name')
if caller:
LOG.debug('(%s) %s' % (caller, msg))
else:
LOG.debug(msg)
network = get_network_from_name(fixed_network_name,
compute_networks_client)
msg = ('Found network %s available for tenant' % network)
if caller:
LOG.info('(%s) %s' % (caller, msg))
else:
LOG.info(msg)
return network
def set_networks_kwarg(network, kwargs=None):
"""Set 'networks' kwargs for a server create if missing
:param network: dict of network to be used with 'id' and 'name'
:param kwargs: server create kwargs to be enhanced
:return: new dict of kwargs updated to include networks
"""
params = copy.copy(kwargs) or {}
if kwargs and 'networks' in kwargs:
return params
if network:
if 'id' in network.keys():
params.update({"networks": [{'uuid': network['id']}]})
else:
LOG.warning('provided network dict: %s was invalid, did not '
' contain an id' % network)
return params

View File

@ -1,182 +0,0 @@
# Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import jsonschema
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def _check_for_expected_result(name, schema):
expected_result = None
if "results" in schema:
if name in schema["results"]:
expected_result = schema["results"][name]
return expected_result
def generator_type(*args, **kwargs):
def wrapper(func):
func.types = args
for key in kwargs:
setattr(func, key, kwargs[key])
return func
return wrapper
def simple_generator(fn):
"""
Decorator for simple generators that return one value
"""
@functools.wraps(fn)
def wrapped(self, schema):
result = fn(self, schema)
if result is not None:
expected_result = _check_for_expected_result(fn.__name__, schema)
return (fn.__name__, result, expected_result)
return
return wrapped
class BasicGeneratorSet(object):
_instance = None
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"http-method": {
"enum": ["GET", "PUT", "HEAD",
"POST", "PATCH", "DELETE", 'COPY']
},
"admin_client": {"type": "boolean"},
"url": {"type": "string"},
"default_result_code": {"type": "integer"},
"json-schema": {},
"resources": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"name": {"type": "string"},
"expected_result": {"type": "integer"}
}
}
]
}
},
"results": {
"type": "object",
"properties": {}
}
},
"required": ["name", "http-method", "url"],
"additionalProperties": False,
}
def __init__(self):
self.types_dict = {}
for m in dir(self):
if callable(getattr(self, m)) and not'__' in m:
method = getattr(self, m)
if hasattr(method, "types"):
for type in method.types:
if type not in self.types_dict:
self.types_dict[type] = []
self.types_dict[type].append(method)
def validate_schema(self, schema):
if "json-schema" in schema:
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
def generate_scenarios(self, schema, path=None):
"""
Generates the scenario (all possible test cases) out of the given
schema.
:param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
:param path: the schema path if the given schema is a subschema
"""
schema_type = schema['type']
scenarios = []
if schema_type == 'object':
properties = schema["properties"]
for attribute, definition in properties.iteritems():
current_path = copy.copy(path)
if path is not None:
current_path.append(attribute)
else:
current_path = [attribute]
scenarios.extend(
self.generate_scenarios(definition, current_path))
elif isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
for generator in self.types_dict[schema_type]:
if hasattr(generator, "needed_property"):
prop = generator.needed_property
if (prop not in schema or
schema[prop] is None or
schema[prop] is False):
continue
name = generator.__name__
if ("exclude_tests" in schema and
name in schema["exclude_tests"]):
continue
if path is not None:
name = "%s_%s" % ("_".join(path), name)
scenarios.append({
"_negtest_name": name,
"_negtest_generator": generator,
"_negtest_schema": schema,
"_negtest_path": path})
return scenarios
def generate_payload(self, test, schema):
"""
Generates one jsonschema out of the given test. It's mandatory to use
generate_scenarios before to register all needed variables to the test.
:param test: A test object (scenario) with all _negtest variables on it
:param schema: schema for the test
"""
generator = test._negtest_generator
ret = generator(test._negtest_schema)
path = copy.copy(test._negtest_path)
expected_result = None
if ret is not None:
generator_result = generator(test._negtest_schema)
invalid_snippet = generator_result[1]
expected_result = generator_result[2]
element = path.pop()
if len(path) > 0:
schema_snip = reduce(dict.get, path, schema)
schema_snip[element] = invalid_snippet
else:
schema[element] = invalid_snippet
return expected_result

View File

@ -1,79 +0,0 @@
# Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
import tempest.common.generator.base_generator as base
import tempest.common.generator.valid_generator as valid
LOG = logging.getLogger(__name__)
class NegativeTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
@base.simple_generator
def gen_int(self, _):
return 4
@base.generator_type("integer")
@base.simple_generator
def gen_string(self, _):
return "XXXXXX"
@base.generator_type("integer", "string")
def gen_none(self, schema):
# Note(mkoderer): it's not using the decorator otherwise it'd be
# filtered
expected_result = base._check_for_expected_result('gen_none', schema)
return ('gen_none', None, expected_result)
@base.generator_type("string")
@base.simple_generator
def gen_str_min_length(self, schema):
min_length = schema.get("minLength", 0)
if min_length > 0:
return "x" * (min_length - 1)
@base.generator_type("string", needed_property="maxLength")
@base.simple_generator
def gen_str_max_length(self, schema):
max_length = schema.get("maxLength", -1)
return "x" * (max_length + 1)
@base.generator_type("integer", needed_property="minimum")
@base.simple_generator
def gen_int_min(self, schema):
minimum = schema["minimum"]
if "exclusiveMinimum" not in schema:
minimum -= 1
return minimum
@base.generator_type("integer", needed_property="maximum")
@base.simple_generator
def gen_int_max(self, schema):
maximum = schema["maximum"]
if "exclusiveMaximum" not in schema:
maximum += 1
return maximum
@base.generator_type("object", needed_property="additionalProperties")
@base.simple_generator
def gen_obj_add_attr(self, schema):
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
new_valid = copy.deepcopy(valid_schema)
new_valid["$$$$$$$$$$"] = "xxx"
return new_valid

View File

@ -1,82 +0,0 @@
# Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import tempest.common.generator.base_generator as base
LOG = logging.getLogger(__name__)
class ValidTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
@base.simple_generator
def generate_valid_string(self, schema):
size = schema.get("minLength", 1)
# TODO(dkr mko): handle format and pattern
return "x" * size
@base.generator_type("integer")
@base.simple_generator
def generate_valid_integer(self, schema):
# TODO(dkr mko): handle multipleOf
if "minimum" in schema:
minimum = schema["minimum"]
if "exclusiveMinimum" not in schema:
return minimum
else:
return minimum + 1
if "maximum" in schema:
maximum = schema["maximum"]
if "exclusiveMaximum" not in schema:
return maximum
else:
return maximum - 1
return 0
@base.generator_type("object")
@base.simple_generator
def generate_valid_object(self, schema):
obj = {}
for k, v in schema["properties"].iteritems():
obj[k] = self.generate_valid(v)
return obj
def generate(self, schema):
schema_type = schema["type"]
if isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
result = []
if schema_type not in self.types_dict:
raise TypeError("generator (%s) doesn't support type: %s"
% (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
ret = generator(schema)
if ret is not None:
if isinstance(ret, list):
result.extend(ret)
elif isinstance(ret, tuple):
result.append(ret)
else:
raise Exception("generator (%s) returns invalid result: %s"
% (generator, ret))
return result
def generate_valid(self, schema):
return self.generate(schema)[0][1]

View File

@ -1,376 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Originally copied from python-glanceclient
import copy
import hashlib
import json
import posixpath
import re
import socket
import struct
import OpenSSL
from oslo_log import log as logging
import six
from six import moves
from six.moves import http_client as httplib
from six.moves.urllib import parse as urlparse
from tempest_lib import exceptions as lib_exc
from tempest import exceptions as exc
LOG = logging.getLogger(__name__)
USER_AGENT = 'tempest'
CHUNKSIZE = 1024 * 64 # 64kB
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
class HTTPClient(object):
def __init__(self, auth_provider, filters, **kwargs):
self.auth_provider = auth_provider
self.filters = filters
self.endpoint = auth_provider.base_url(filters)
endpoint_parts = urlparse.urlparse(self.endpoint)
self.endpoint_scheme = endpoint_parts.scheme
self.endpoint_hostname = endpoint_parts.hostname
self.endpoint_port = endpoint_parts.port
self.endpoint_path = endpoint_parts.path
self.connection_class = self.get_connection_class(self.endpoint_scheme)
self.connection_kwargs = self.get_connection_kwargs(
self.endpoint_scheme, **kwargs)
@staticmethod
def get_connection_class(scheme):
if scheme == 'https':
return VerifiedHTTPSConnection
else:
return httplib.HTTPConnection
@staticmethod
def get_connection_kwargs(scheme, **kwargs):
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
if scheme == 'https':
_kwargs['ca_certs'] = kwargs.get('ca_certs', None)
_kwargs['cert_file'] = kwargs.get('cert_file', None)
_kwargs['key_file'] = kwargs.get('key_file', None)
_kwargs['insecure'] = kwargs.get('insecure', False)
_kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
return _kwargs
def get_connection(self):
_class = self.connection_class
try:
return _class(self.endpoint_hostname, self.endpoint_port,
**self.connection_kwargs)
except httplib.InvalidURL:
raise exc.EndpointNotFound
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
self._log_request(method, url, kwargs['headers'])
conn = self.get_connection()
try:
url_parts = urlparse.urlparse(url)
conn_url = posixpath.normpath(url_parts.path)
LOG.debug('Actual Path: {path}'.format(path=conn_url))
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
conn.putrequest(method, conn_url)
for header, value in kwargs['headers'].items():
conn.putheader(header, value)
conn.endheaders()
chunk = kwargs['body'].read(CHUNKSIZE)
# Chunk it, baby...
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = kwargs['body'].read(CHUNKSIZE)
conn.send('0\r\n\r\n')
else:
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = ("Error finding address for %(url)s: %(e)s" %
{'url': url, 'e': e})
raise exc.EndpointNotFound(message)
except (socket.error, socket.timeout) as e:
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': self.endpoint, 'e': e})
raise exc.TimeoutException(message)
body_iter = ResponseBodyIterator(resp)
# Read body into string if it isn't obviously image data
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([body_chunk for body_chunk in body_iter])
body_iter = six.StringIO(body_str)
self._log_response(resp, None)
else:
self._log_response(resp, body_iter)
return resp, body_iter
def _log_request(self, method, url, headers):
LOG.info('Request: ' + method + ' ' + url)
if headers:
headers_out = headers
if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
token = headers['X-Auth-Token']
if len(token) > 64 and TOKEN_CHARS_RE.match(token):
headers_out = headers.copy()
headers_out['X-Auth-Token'] = "<Token omitted>"
LOG.info('Request Headers: ' + str(headers_out))
def _log_response(self, resp, body):
status = str(resp.status)
LOG.info("Response Status: " + status)
if resp.getheaders():
LOG.info('Response Headers: ' + str(resp.getheaders()))
if body:
str_body = str(body)
length = len(body)
LOG.info('Response Body: ' + str_body[:2048])
if length >= 2048:
self.LOG.debug("Large body (%d) md5 summary: %s", length,
hashlib.md5(str_body).hexdigest())
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
if kwargs['headers']['Content-Type'] != 'application/json':
msg = "Only application/json content-type is supported."
raise lib_exc.InvalidContentType(msg)
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
if 'application/json' in resp.getheader('content-type', ''):
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
msg = "Only json/application content-type is supported."
raise lib_exc.InvalidContentType(msg)
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
if 'body' in kwargs:
if (hasattr(kwargs['body'], 'read')
and method.lower() in ('post', 'put')):
# We use 'Transfer-Encoding: chunked' because
# body size may not always be known in advance.
kwargs['headers']['Transfer-Encoding'] = 'chunked'
# Decorate the request with auth
req_url, kwargs['headers'], kwargs['body'] = \
self.auth_provider.auth_request(
method=method, url=url, headers=kwargs['headers'],
body=kwargs.get('body', None), filters=self.filters)
return self._http_request(req_url, method, **kwargs)
class OpenSSLConnectionDelegator(object):
"""
An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which httplib requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
# Ensure the socket is closed when this file is closed
kwargs['close'] = True
return socket._fileobject(self.connection, *args, **kwargs)
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
"""
Extended HTTPSConnection which uses the OpenSSL library
for enhanced SSL support.
Note: Much of this functionality can eventually be replaced
with native Python 3.3 code.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
ca_certs=None, timeout=None, insecure=False,
ssl_compression=True):
httplib.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.timeout = timeout
self.insecure = insecure
self.ssl_compression = ssl_compression
self.ca_certs = ca_certs
self.setcontext()
@staticmethod
def host_matches_cert(host, x509):
"""
Verify that the the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
# First see if we can match the CN
if x509.get_subject().commonName == host:
return True
# Also try Subject Alternative Names for a match
san_list = None
for i in moves.xrange(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == 'subjectAltName':
san_list = str(ext)
for san in ''.join(san_list.split()).split(','):
if san == "DNS:%s" % host:
return True
# Server certificate does not match host
msg = ('Host "%s" does not match x509 certificate contents: '
'CommonName "%s"' % (host, x509.get_subject().commonName))
if san_list is not None:
msg = msg + ', subjectAltName "%s"' % san_list
raise exc.SSLCertificateError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
if x509.has_expired():
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
raise exc.SSLCertificateError(msg)
if depth == 0 and preverify_ok is True:
# We verify that the host matches against the last
# certificate in the chain
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result
return preverify_ok
def setcontext(self):
"""
Set up the OpenSSL context.
"""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.ssl_compression is False:
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
self.verify_callback)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
raise exc.SSLConfigurationError(msg)
if self.key_file is None:
# We support having key and cert in same file
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = ('No key file specified and unable to load key '
'from "%s" %s' % (self.cert_file, e))
raise exc.SSLConfigurationError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
raise exc.SSLConfigurationError(msg)
if self.ca_certs:
try:
self.context.load_verify_locations(self.ca_certs)
except Exception as e:
msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
raise exc.SSLConfigurationError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
"""
Connect to an SSL port using the OpenSSL library and apply
per-connection parameters.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('LL', self.timeout, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
def close(self):
if self.sock:
# Remove the reference to the socket but don't close it yet.
# Response close will close both socket and associated
# file. Closing socket too soon will cause response
# reads to fail with socket IO error 'Bad file descriptor'.
self.sock = None
httplib.HTTPSConnection.close(self)
class ResponseBodyIterator(object):
"""A class that acts as an iterator over an HTTP response."""
def __init__(self, resp):
self.resp = resp
def __iter__(self):
while True:
yield self.next()
def next(self):
chunk = self.resp.read(CHUNKSIZE)
if chunk:
return chunk
else:
raise StopIteration()

View File

@ -1,438 +0,0 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_log import log as logging
import six
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest import clients
from tempest.common import cred_provider
from tempest import config
from tempest import exceptions
from tempest.services.identity.v2.json import identity_client as v2_identity
CONF = config.CONF
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class CredsClient(object):
"""This class is a wrapper around the identity clients, to provide a
single interface for managing credentials in both v2 and v3 cases.
It's not bound to created credentials, only to a specific set of admin
credentials used for generating credentials.
"""
def __init__(self, identity_client):
# The client implies version and credentials
self.identity_client = identity_client
self.credentials = self.identity_client.auth_provider.credentials
def create_user(self, username, password, project, email):
user = self.identity_client.create_user(
username, password, project['id'], email)
return user
@abc.abstractmethod
def create_project(self, name, description):
pass
def assign_user_role(self, user, project, role_name):
try:
roles = self._list_roles()
role = next(r for r in roles if r['name'] == role_name)
except StopIteration:
msg = 'No "%s" role found' % role_name
raise lib_exc.NotFound(msg)
try:
self.identity_client.assign_user_role(project['id'], user['id'],
role['id'])
except lib_exc.Conflict:
LOG.debug("Role %s already assigned on project %s for user %s" % (
role['id'], project['id'], user['id']))
@abc.abstractmethod
def get_credentials(self, user, project, password):
pass
def delete_user(self, user_id):
self.identity_client.delete_user(user_id)
def _list_roles(self):
roles = self.identity_client.list_roles()
return roles
class V2CredsClient(CredsClient):
def create_project(self, name, description):
tenant = self.identity_client.create_tenant(
name=name, description=description)
return tenant
def get_credentials(self, user, project, password):
return cred_provider.get_credentials(
identity_version='v2',
username=user['name'], user_id=user['id'],
tenant_name=project['name'], tenant_id=project['id'],
password=password)
def delete_project(self, project_id):
self.identity_client.delete_tenant(project_id)
class V3CredsClient(CredsClient):
def __init__(self, identity_client, domain_name):
super(V3CredsClient, self).__init__(identity_client)
try:
# Domain names must be unique, in any case a list is returned,
# selecting the first (and only) element
self.creds_domain = self.identity_client.list_domains(
params={'name': domain_name})[0]
except lib_exc.NotFound:
# TODO(andrea) we could probably create the domain on the fly
msg = "Configured domain %s could not be found" % domain_name
raise exceptions.InvalidConfiguration(msg)
def create_project(self, name, description):
project = self.identity_client.create_project(
name=name, description=description,
domain_id=self.creds_domain['id'])
return project
def get_credentials(self, user, project, password):
return cred_provider.get_credentials(
identity_version='v3',
username=user['name'], user_id=user['id'],
project_name=project['name'], project_id=project['id'],
password=password,
project_domain_name=self.creds_domain['name'])
def delete_project(self, project_id):
self.identity_client.delete_project(project_id)
def get_creds_client(identity_client, project_domain_name=None):
if isinstance(identity_client, v2_identity.IdentityClientJSON):
return V2CredsClient(identity_client)
else:
return V3CredsClient(identity_client, project_domain_name)
class IsolatedCreds(cred_provider.CredentialProvider):
def __init__(self, identity_version=None, name=None, password='pass',
network_resources=None):
super(IsolatedCreds, self).__init__(identity_version, name, password,
network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.ports = []
self.password = password
self.default_admin_creds = cred_provider.get_configured_credentials(
'identity_admin', fill_in=True,
identity_version=self.identity_version)
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
# Domain where isolated credentials are provisioned (v3 only).
# Use that of the admin account is None is configured.
self.creds_domain_name = None
if self.identity_version == 'v3':
self.creds_domain_name = (
CONF.auth.tenant_isolation_domain_name or
self.default_admin_creds.project_domain_name)
self.creds_client = get_creds_client(
self.identity_admin_client, self.creds_domain_name)
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
order):
identity
network
"""
os = clients.Manager(self.default_admin_creds)
if self.identity_version == 'v2':
return os.identity_client, os.network_client
else:
return os.identity_v3_client, os.network_client
def _create_creds(self, suffix="", admin=False, roles=None):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
project_name = data_utils.rand_name(root) + suffix
project_desc = project_name + "-desc"
project = self.creds_client.create_project(
name=project_name, description=project_desc)
username = data_utils.rand_name(root) + suffix
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self.creds_client.create_user(
username, self.password, project, email)
if admin:
self.creds_client.assign_user_role(user, project,
CONF.identity.admin_role)
# Add roles specified in config file
for conf_role in CONF.auth.tempest_roles:
self.creds_client.assign_user_role(user, project, conf_role)
# Add roles requested by caller
if roles:
for role in roles:
self.creds_client.assign_user_role(user, project, role)
creds = self.creds_client.get_credentials(user, project, self.password)
return cred_provider.TestResources(creds)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'], network['name'])
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
resp_body = self.network_admin_client.create_network(
name=name, tenant_id=tenant_id)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.network_resources:
resp_body = self.network_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp_body = self.network_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
break
except lib_exc.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise Exception(message)
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
resp_body = self.network_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
self.network_admin_client.add_router_interface_with_subnet_id(
router_id, subnet_id)
def get_credentials(self, credential_type):
if self.isolated_creds.get(str(credential_type)):
credentials = self.isolated_creds[str(credential_type)]
else:
if credential_type in ['primary', 'alt', 'admin']:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
else:
credentials = self._create_creds(roles=credential_type)
self.isolated_creds[str(credential_type)] = credentials
# Maintained until tests are ported
LOG.info("Acquired isolated creds:\n credentials: %s"
% credentials)
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
credentials.set_resources(network=network, subnet=subnet,
router=router)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
return credentials
def get_primary_creds(self):
return self.get_credentials('primary')
def get_admin_creds(self):
return self.get_credentials('admin')
def get_alt_creds(self):
return self.get_credentials('alt')
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
# The roles list as a str will become the index as the dict key for
# the created credentials set in the isolated_creds dict.
exist_creds = self.isolated_creds.get(str(roles))
# If force_new flag is True 2 cred sets with the same roles are needed
# handle this by creating a separate index for old one to store it
# separately for cleanup
if exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self.isolated_creds))
self.isolated_creds[new_index] = exist_creds
del self.isolated_creds[str(roles)]
return self.get_credentials(roles)
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
try:
net_client.delete_router(router_id)
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
try:
net_client.delete_subnet(subnet_id)
except lib_exc.NotFound:
LOG.warning('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
try:
net_client.delete_network(network_id)
except lib_exc.NotFound:
LOG.warning('network with name: %s not found for delete' %
network_name)
def _cleanup_default_secgroup(self, tenant):
net_client = self.network_admin_client
resp_body = net_client.list_security_groups(tenant_id=tenant,
name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
try:
net_client.delete_security_group(secgroup['id'])
except lib_exc.NotFound:
LOG.warning('Security group %s, id %s not found for clean-up' %
(secgroup['name'], secgroup['id']))
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
for cred in self.isolated_creds:
creds = self.isolated_creds.get(cred)
if (not creds or not any([creds.router, creds.network,
creds.subnet])):
continue
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': creds.network, 'subnet': creds.subnet,
'router': creds.router})
if (not self.network_resources or
(self.network_resources.get('router') and creds.subnet)):
try:
net_client.remove_router_interface_with_subnet_id(
creds.router['id'], creds.subnet['id'])
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete' %
creds.router['name'])
self._clear_isolated_router(creds.router['id'],
creds.router['name'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(creds.subnet['id'],
creds.subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(creds.network['id'],
creds.network['name'])
def clear_isolated_creds(self):
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
for creds in self.isolated_creds.itervalues():
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
LOG.warning("user with name: %s not found for delete" %
creds.username)
try:
if CONF.service_available.neutron:
self._cleanup_default_secgroup(creds.tenant_id)
self.creds_client.delete_project(creds.tenant_id)
except lib_exc.NotFound:
LOG.warning("tenant with name: %s not found for delete" %
creds.tenant_name)
self.isolated_creds = {}
def is_multi_user(self):
return True
def is_multi_tenant(self):
return True
def is_role_available(self, role):
return True

View File

@ -1,85 +0,0 @@
# (c) 2014 Deutsche Telekom AG
# Copyright 2014 Red Hat, Inc.
# Copyright 2014 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import service_client
from tempest import config
CONF = config.CONF
class NegativeRestClient(service_client.ServiceClient):
"""
Version of RestClient that does not raise exceptions.
"""
def __init__(self, auth_provider, service,
build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
region, endpoint_type = self._get_region_and_endpoint_type(service)
super(NegativeRestClient, self).__init__(
auth_provider,
service,
region,
endpoint_type=endpoint_type,
build_interval=build_interval,
build_timeout=build_timeout,
disable_ssl_certificate_validation=(
disable_ssl_certificate_validation),
ca_certs=ca_certs,
trace_requests=trace_requests)
def _get_region_and_endpoint_type(self, service):
"""
Returns the region for a specific service
"""
service_region = None
service_endpoint_type = None
for cfgname in dir(CONF._config):
# Find all config.FOO.catalog_type and assume FOO is a service.
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if catalog_type == service:
service_region = getattr(cfg, 'region', None)
service_endpoint_type = getattr(cfg, 'endpoint_type', None)
if not service_region:
service_region = CONF.identity.region
return service_region, service_endpoint_type
def _error_checker(self, method, url,
headers, body, resp, resp_body):
pass
def send_request(self, method, url_template, resources, body=None):
url = url_template % tuple(resources)
if method == "GET":
resp, body = self.get(url)
elif method == "POST":
resp, body = self.post(url, body)
elif method == "PUT":
resp, body = self.put(url, body)
elif method == "PATCH":
resp, body = self.patch(url, body)
elif method == "HEAD":
resp, body = self.head(url)
elif method == "DELETE":
resp, body = self.delete(url)
elif method == "COPY":
resp, body = self.copy(url)
else:
assert False
return resp, body

View File

@ -1,85 +0,0 @@
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common import rest_client
class ServiceClient(rest_client.RestClient):
def __init__(self, auth_provider, service, region,
endpoint_type=None, build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None, ca_certs=None,
trace_requests=None):
dscv = disable_ssl_certificate_validation
params = {
'disable_ssl_certificate_validation': dscv,
'ca_certs': ca_certs,
'trace_requests': trace_requests
}
if endpoint_type is not None:
params.update({'endpoint_type': endpoint_type})
if build_interval is not None:
params.update({'build_interval': build_interval})
if build_timeout is not None:
params.update({'build_timeout': build_timeout})
super(ServiceClient, self).__init__(auth_provider, service, region,
**params)
class ResponseBody(dict):
"""Class that wraps an http response and dict body into a single value.
Callers that receive this object will normally use it as a dict but
can extract the response if needed.
"""
def __init__(self, response, body=None):
body_data = body or {}
self.update(body_data)
self.response = response
def __str__(self):
body = super(ResponseBody, self).__str__()
return "response: %s\nBody: %s" % (self.response, body)
class ResponseBodyData(object):
"""Class that wraps an http response and string data into a single value.
"""
def __init__(self, response, data):
self.response = response
self.data = data
def __str__(self):
return "response: %s\nBody: %s" % (self.response, self.data)
class ResponseBodyList(list):
"""Class that wraps an http response and list body into a single value.
Callers that receive this object will normally use it as a list but
can extract the response if needed.
"""
def __init__(self, response, body=None):
body_data = body or []
self.extend(body_data)
self.response = response
def __str__(self):
body = super(ResponseBodyList, self).__str__()
return "response: %s\nBody: %s" % (self.response, body)

View File

@ -1,151 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import select
import socket
import time
import warnings
from oslo_log import log as logging
import six
from six import moves
from tempest import exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
LOG = logging.getLogger(__name__)
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, six.string_types):
pkey = paramiko.RSAKey.from_private_key(
moves.cStringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self, sleep=1.5, backoff=1):
"""Returns an ssh connection to the specified host."""
bsleep = sleep
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
if self.pkey is not None:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with public key authentication",
self.host, self.username)
else:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with password %s",
self.host, self.username, str(self.password))
attempts = 0
while True:
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.channel_timeout, pkey=self.pkey)
LOG.info("ssh connection to %s@%s successfuly created",
self.username, self.host)
return ssh
except (socket.error,
paramiko.SSHException) as e:
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
" connection to %s@%s after %d attempts",
self.username, self.host, attempts)
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
bsleep += backoff
attempts += 1
LOG.warning("Failed to establish authenticated ssh"
" connection to %s@%s (%s). Number attempts: %s."
" Retry after %d seconds.",
self.username, self.host, e, attempts, bsleep)
time.sleep(bsleep)
def _is_timed_out(self, start_time):
return (time.time() - self.timeout) > start_time
def exec_command(self, cmd):
"""
Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
start_time = time.time()
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
if not self._is_timed_out(start_time):
continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Raises an exception when we can not connect to server via ssh."""
connection = self._get_ssh_connection()
connection.close()

View File

@ -1,21 +0,0 @@
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency.fixture import lockutils
class LockFixture(lockutils.LockFixture):
def __init__(self, name):
super(LockFixture, self).__init__(name, 'tempest-')

View File

@ -1,3 +0,0 @@
PING_IPV4_COMMAND = 'ping -c 3 '
PING_IPV6_COMMAND = 'ping6 -c 3 '
PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss'

View File

@ -1,23 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def have_effective_read_access(path):
try:
fh = open(path, "rb")
except IOError:
return False
fh.close()
return True

View File

@ -1,179 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
import time
import six
from tempest.common import ssh
from tempest import config
from tempest import exceptions
CONF = config.CONF
class RemoteClient(object):
# NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
# zfl: use server's generated adminpass can't ssh,so
# if password is a generated one,
# use CONF.compute.image_ssh_password instead;
# if password is specified by use, just use it
if password is not None and password.startswith('opencos'):
self.password = password[len('opencos'):]
else:
self.password = CONF.compute.image_ssh_password
self.ssh_timeout = CONF.compute.ssh_timeout
# network = CONF.compute.network_for_ssh
network = CONF.compute.fixed_network_name
self.ip_version = CONF.compute.ip_version_for_ssh
self.ssh_channel_timeout = CONF.compute.ssh_channel_timeout
if isinstance(server, six.string_types):
self.ip_address = server
else:
self.addresses = server['addresses'][network]
# zfl look for network backwards (first floating ip,then internal
# ip)
for address in self.addresses[::-1]:
if address['version'] == self.ip_version:
self.ip_address = address['addr']
break
else:
raise exceptions.ServerUnreachable()
self.ssh_client = ssh.Client(self.ip_address, username, self.password,
self.ssh_timeout, pkey=pkey,
channel_timeout=self.ssh_channel_timeout)
def exec_command(self, cmd):
return self.ssh_client.exec_command(cmd)
def validate_authentication(self):
"""Validate ssh connection and authentication
This method raises an Exception when the validation fails.
"""
self.ssh_client.test_connection_auth()
def hostname_equals_servername(self, expected_hostname):
# Get host name using command "hostname"
actual_hostname = self.exec_command("hostname").rstrip()
return expected_hostname == actual_hostname
def get_ram_size_in_mb(self):
output = self.exec_command('free -m | grep Mem')
if output:
return output.split()[1]
def get_number_of_vcpus(self):
command = 'cat /proc/cpuinfo | grep processor | wc -l'
output = self.exec_command(command)
return int(output)
def get_partitions(self):
# Return the contents of /proc/partitions
command = 'cat /proc/partitions'
output = self.exec_command(command)
return output
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
boot_time = time.time() - int(boot_secs)
return time.localtime(boot_time)
def write_to_console(self, message):
message = re.sub("([$\\`])", "\\\\\\\\\\1", message)
# usually to /dev/ttyS0
cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
return self.exec_command(cmd)
def ping_host(self, host, count=CONF.compute.ping_count,
size=CONF.compute.ping_size):
addr = netaddr.IPAddress(host)
cmd = 'ping6' if addr.version == 6 else 'ping'
cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
return self.exec_command(cmd)
def get_mac_address(self):
cmd = "/bin/ip addr | awk '/ether/ {print $2}'"
return self.exec_command(cmd)
def get_nic_name(self, address):
cmd = "/bin/ip -o addr | awk '/%s/ {print $2}'" % address
return self.exec_command(cmd)
def get_ip_list(self):
cmd = "/bin/ip address"
return self.exec_command(cmd)
def assign_static_ip(self, nic, addr):
cmd = "sudo /bin/ip addr add {ip}/{mask} dev {nic}".format(
ip=addr, mask=CONF.network.tenant_network_mask_bits,
nic=nic
)
return self.exec_command(cmd)
def turn_nic_on(self, nic):
cmd = "sudo /bin/ip link set {nic} up".format(nic=nic)
return self.exec_command(cmd)
def get_pids(self, pr_name):
# Get pid(s) of a process/program
cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
return self.exec_command(cmd).split('\n')
def get_dns_servers(self):
cmd = 'cat /etc/resolv.conf'
resolve_file = self.exec_command(cmd).strip().split('\n')
entries = (l.split() for l in resolve_file)
dns_servers = [l[1] for l in entries
if len(l) and l[0] == 'nameserver']
return dns_servers
def send_signal(self, pid, signum):
cmd = 'sudo /bin/kill -{sig} {pid}'.format(pid=pid, sig=signum)
return self.exec_command(cmd)
def _renew_lease_udhcpc(self, fixed_ip=None):
"""Renews DHCP lease via udhcpc client. """
file_path = '/var/run/udhcpc.'
nic_name = self.get_nic_name(fixed_ip)
nic_name = nic_name.strip().lower()
pid = self.exec_command('cat {path}{nic}.pid'.
format(path=file_path, nic=nic_name))
pid = pid.strip()
self.send_signal(pid, 'USR1')
def _renew_lease_dhclient(self, fixed_ip=None):
"""Renews DHCP lease via dhclient client. """
cmd = "sudo /sbin/dhclient -r && sudo /sbin/dhclient"
self.exec_command(cmd)
def renew_lease(self, fixed_ip=None):
"""Wrapper method for renewing DHCP lease via given client
Supporting:
* udhcpc
* dhclient
"""
# TODO(yfried): add support for dhcpcd
suported_clients = ['udhcpc', 'dhclient']
dhcp_client = CONF.scenario.dhcp_client
if dhcp_client not in suported_clients:
raise exceptions.InvalidConfiguration('%s DHCP client unsupported'
% dhcp_client)
if dhcp_client == 'udhcpc' and not fixed_ip:
raise ValueError("need to set 'fixed_ip' for udhcpc client")
return getattr(self, '_renew_lease_' + dhcp_client)(fixed_ip=fixed_ip)

View File

@ -1,161 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest_lib.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
"""Waits for a server to reach a given status."""
def _get_task_state(body):
return body.get('OS-EXT-STS:task_state', None)
# NOTE(afazekas): UNKNOWN status possible on ERROR
# or in a very early stage.
body = client.get_server(server_id)
old_status = server_status = body['status']
old_task_state = task_state = _get_task_state(body)
start_time = int(time.time())
timeout = client.build_timeout + extra_timeout
while True:
# NOTE(afazekas): Now the BUILD status only reached
# between the UNKNOWN->ACTIVE transition.
# TODO(afazekas): enumerate and validate the stable status set
if status == 'BUILD' and server_status != 'UNKNOWN':
return
if server_status == status:
if ready_wait:
if status == 'BUILD':
return
# NOTE(afazekas): The instance is in "ready for action state"
# when no task in progress
# NOTE(afazekas): Converted to string bacuse of the XML
# responses
if str(task_state) == "None":
# without state api extension 3 sec usually enough
time.sleep(CONF.compute.ready_wait)
return
else:
return
time.sleep(client.build_interval)
body = client.get_server(server_id)
server_status = body['status']
task_state = _get_task_state(body)
if (server_status != old_status) or (task_state != old_task_state):
LOG.info('State transition "%s" ==> "%s" after %d second wait',
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if (server_status == 'ERROR') and raise_on_error:
if 'fault' in body:
raise exceptions.BuildErrorException(body['fault'],
server_id=server_id)
else:
raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
if timed_out:
expected_task_state = 'None' if ready_wait else 'n/a'
message = ('Server %(server_id)s failed to reach %(status)s '
'status and task state "%(expected_task_state)s" '
'within the required time (%(timeout)s s).' %
{'server_id': server_id,
'status': status,
'expected_task_state': expected_task_state,
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
def wait_for_image_status(client, image_id, status):
"""Waits for an image to reach a given status.
The client should have a get_image(image_id) method to get the image.
The client should also have build_interval and build_timeout attributes.
"""
image = client.get_image(image_id)
start = int(time.time())
while image['status'] != status:
time.sleep(client.build_interval)
image = client.get_image(image_id)
status_curr = image['status']
if status_curr == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
# check the status again to avoid a false negative where we hit
# the timeout at the same time that the image reached the expected
# status
if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Image %(image_id)s failed to reach %(status)s state'
'(current state %(status_curr)s) '
'within the required time (%(timeout)s s).' %
{'image_id': image_id,
'status': status,
'status_curr': status_curr,
'timeout': client.build_timeout})
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
def wait_for_bm_node_status(client, node_id, attr, status):
"""Waits for a baremetal node attribute to reach given status.
The client should have a show_node(node_uuid) method to get the node.
"""
_, node = client.show_node(node_id)
start = int(time.time())
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
status_curr = node[attr]
if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
'within the required time (%(timeout)s s).' %
{'node_id': node_id,
'attr': attr,
'status': status,
'timeout': client.build_timeout})
message += ' Current state of %s: %s.' % (attr, status_curr)
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)

File diff suppressed because it is too large Load Diff

View File

@ -1,246 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
# zfl add : get related opencos log
import re
import traceback
import time
from tempest_lib.common.utils import misc
# zfl add a decorator to show func's exec time
def exectime(func):
def newfunc(*args, **args2):
t0 = time.time()
f = func(*args, **args2)
print "\n\n@====exectime====%.3fs taken for {%s}\n\n" \
% (time.time() - t0, func.__name__)
return f
return newfunc
# zfl add end
class TempestException(Exception):
"""
Base Tempest Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, *args, **kwargs):
super(TempestException, self).__init__()
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
if len(args) > 0:
# If there is a non-kwarg parameter, assume it's the error
# message or reason description and tack it on to the end
# of the exception message
# Convert all arguments into their string representations...
args = ["%s" % arg for arg in args]
self._error_string = (self._error_string +
"\nDetails: %s" % '\n'.join(args))
def __str__(self):
# zfl add : get related opencos log
try:
# zfl for debug only: stop and reserve
# if issubclass(type(self),SSHTimeout):
# import time
# time.sleep(999999999)
# zfl for debug only: stop and reserve end
self.get_log = True
log_obj = misc.GetOpenCosLog()
pattern1 = re.compile(r'\w+-\w+-\w+-\w+-\w+')
sresult = pattern1.findall(self._error_string)
obj_type = ""
if 'server' in self._error_string.lower():
obj_type = "server"
if sresult:
log_result = log_obj.get_opencos_log(sresult,
obj_type=obj_type)
else:
log_result = log_obj.get_opencos_log([],
obj_type=obj_type)
log_result = "\n--------- <logs>\n" + log_result
self._error_string = self._error_string + \
"\n\n\n===possible log ===" + \
log_result + \
"\n\n===possiblelog end===\n\n\n"
except Exception as e:
print "ZTE ===zfl : error ==", e
traceback.print_exc()
# else:
# print "\n========zfl,exception call __str__ again"
# zfl add end: get related opencos log
return self._error_string
class RestClientException(TempestException,
testtools.TestCase.failureException):
pass
class InvalidConfiguration(TempestException):
message = "Invalid Configuration"
class InvalidCredentials(TempestException):
message = "Invalid Credentials"
class InvalidServiceTag(TempestException):
message = "Invalid service tag"
class InvalidIdentityVersion(TempestException):
message = "Invalid version %(identity_version) of the identity service"
class TimeoutException(TempestException):
message = "Request timed out"
class BuildErrorException(TempestException):
message = "Server %(server_id)s failed to build and is in ERROR status"
class ImageKilledException(TempestException):
message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
class AddImageException(TempestException):
message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
class EC2RegisterImageException(TempestException):
message = ("Image %(image_id)s failed to become 'available' "
"in the allotted time")
class VolumeBuildErrorException(TempestException):
message = "Volume %(volume_id)s failed to build and is in ERROR status"
class SnapshotBuildErrorException(TempestException):
message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
class VolumeBackupException(TempestException):
message = "Volume backup %(backup_id)s failed and is in ERROR status"
class StackBuildErrorException(TempestException):
message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
"due to '%(stack_status_reason)s'")
class StackResourceBuildErrorException(TempestException):
message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
"in %(resource_status)s status due to "
"'%(resource_status_reason)s'")
class AuthenticationFailure(TempestException):
message = ("Authentication with user %(user)s and password "
"%(password)s failed auth using tenant %(tenant)s.")
class EndpointNotFound(TempestException):
message = "Endpoint not found"
class ImageFault(TempestException):
message = "Got image fault"
class IdentityError(TempestException):
message = "Got identity error"
class SSHTimeout(TempestException):
message = ("Connection to the %(host)s via SSH timed out.\n"
"User: %(user)s, Password: %(password)s")
class SSHExecCommandFailed(TempestException):
"""Raised when remotely executed command returns nonzero status."""
message = ("Command '%(command)s', exit status: %(exit_status)d, "
"Error:\n%(strerror)s")
class ServerUnreachable(TempestException):
message = "The server is not reachable via the configured network"
class TearDownException(TempestException):
message = "%(num)d cleanUp operation failed"
class RFCViolation(RestClientException):
message = "RFC Violation"
class InvalidHttpSuccessCode(RestClientException):
message = "The success code is different than the expected one"
class BadRequest(RestClientException):
message = "Bad request"
class ResponseWithNonEmptyBody(RFCViolation):
message = ("RFC Violation! Response with %(status)d HTTP Status Code "
"MUST NOT have a body")
class ResponseWithEntity(RFCViolation):
message = ("RFC Violation! Response with 205 HTTP Status Code "
"MUST NOT have an entity")
class InvalidHTTPResponseHeader(RestClientException):
message = "HTTP response header is invalid"
class InvalidStructure(TempestException):
message = "Invalid structure of table with details"
class CommandFailed(Exception):
def __init__(self, returncode, cmd, output, stderr):
super(CommandFailed, self).__init__()
self.returncode = returncode
self.cmd = cmd
self.stdout = output
self.stderr = stderr
def __str__(self):
return ("Command '%s' returned non-zero exit status %d.\n"
"stdout:\n%s\n"
"stderr:\n%s" % (self.cmd,
self.returncode,
self.stdout,
self.stderr))

View File

@ -1,80 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import auth
from tempest.common import cred_provider
from tempest import config
from tempest import exceptions
CONF = config.CONF
class Manager(object):
"""
Base manager class
Manager objects are responsible for providing a configuration object
and a client object for a test case to use in performing actions.
"""
def __init__(self, credentials=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
standard username/password/tenant_name[/domain_name] is used.
:param credentials: Override of the credentials
"""
self.auth_version = CONF.identity.auth_version
if credentials is None:
self.credentials = cred_provider.get_configured_credentials('user')
else:
self.credentials = credentials
# Check if passed or default credentials are valid
if not self.credentials.is_valid():
raise exceptions.InvalidCredentials()
# Tenant isolation creates TestResources, but Accounts and some tests
# creates Credentials
if isinstance(credentials, cred_provider.TestResources):
creds = self.credentials.credentials
else:
creds = self.credentials
# Creates an auth provider for the credentials
self.auth_provider = get_auth_provider(creds)
# FIXME(andreaf) unused
self.client_attr_names = []
def get_auth_provider_class(credentials):
if isinstance(credentials, auth.KeystoneV3Credentials):
return auth.KeystoneV3AuthProvider, CONF.identity.uri_v3
else:
return auth.KeystoneV2AuthProvider, CONF.identity.uri
def get_auth_provider(credentials):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
if credentials is None:
raise exceptions.InvalidCredentials(
'Credentials must be specified')
auth_provider_class, auth_url = get_auth_provider_class(
credentials)
return auth_provider_class(credentials, auth_url, **default_params)

View File

@ -1,219 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
import urllib
import six
from tempest.common import service_client
def handle_errors(f):
"""A decorator that allows to ignore certain types of errors."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
param_name = 'ignore_errors'
ignored_errors = kwargs.get(param_name, tuple())
if param_name in kwargs:
del kwargs[param_name]
try:
return f(*args, **kwargs)
except ignored_errors:
# Silently ignore errors
pass
return wrapper
class BaremetalClient(service_client.ServiceClient):
"""
Base Tempest REST client for Ironic API.
"""
uri_prefix = ''
def serialize(self, object_dict):
"""Serialize an Ironic object."""
return json.dumps(object_dict)
def deserialize(self, object_str):
"""Deserialize an Ironic object."""
return json.loads(object_str)
def _get_uri(self, resource_name, uuid=None, permanent=False):
"""
Get URI for a specific resource or object.
:param resource_name: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:return: Relative URI for the resource or object.
"""
prefix = self.uri_prefix if not permanent else ''
return '{pref}/{res}{uuid}'.format(pref=prefix,
res=resource_name,
uuid='/%s' % uuid if uuid else '')
def _make_patch(self, allowed_attributes, **kw):
"""
Create a JSON patch according to RFC 6902.
:param allowed_attributes: An iterable object that contains a set of
allowed attributes for an object.
:param **kw: Attributes and new values for them.
:return: A JSON path that sets values of the specified attributes to
the new ones.
"""
def get_change(kw, path='/'):
for name, value in six.iteritems(kw):
if isinstance(value, dict):
for ch in get_change(value, path + '%s/' % name):
yield ch
else:
if value is None:
yield {'path': path + name,
'op': 'remove'}
else:
yield {'path': path + name,
'value': value,
'op': 'replace'}
patch = [ch for ch in get_change(kw)
if ch['path'].lstrip('/') in allowed_attributes]
return patch
def _list_request(self, resource, permanent=False, **kwargs):
"""
Get the list of objects of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
"param **kw: Parameters for the request.
:return: A tuple with the server response and deserialized JSON list
of objects
"""
uri = self._get_uri(resource, permanent=permanent)
if kwargs:
uri += "?%s" % urllib.urlencode(kwargs)
resp, body = self.get(uri)
self.expected_success(200, resp['status'])
return resp, self.deserialize(body)
def _show_request(self, resource, uuid, permanent=False, **kwargs):
"""
Gets a specific object of the specified type.
:param uuid: Unique identifier of the object in UUID format.
:return: Serialized object as a dictionary.
"""
if 'uri' in kwargs:
uri = kwargs['uri']
else:
uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
resp, body = self.get(uri)
self.expected_success(200, resp['status'])
return resp, self.deserialize(body)
def _create_request(self, resource, object_dict):
"""
Create an object of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param object_dict: A Python dict that represents an object of the
specified type.
:return: A tuple with the server response and the deserialized created
object.
"""
body = self.serialize(object_dict)
uri = self._get_uri(resource)
resp, body = self.post(uri, body=body)
self.expected_success(201, resp['status'])
return resp, self.deserialize(body)
def _delete_request(self, resource, uuid):
"""
Delete specified object.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:return: A tuple with the server response and the response body.
"""
uri = self._get_uri(resource, uuid)
resp, body = self.delete(uri)
self.expected_success(204, resp['status'])
return resp, body
def _patch_request(self, resource, uuid, patch_object):
"""
Update specified object with JSON-patch.
:param resource: The name of the REST resource, e.g., 'nodes'.
:param uuid: The unique identifier of an object in UUID format.
:return: A tuple with the server response and the serialized patched
object.
"""
uri = self._get_uri(resource, uuid)
patch_body = json.dumps(patch_object)
resp, body = self.patch(uri, body=patch_body)
self.expected_success(200, resp['status'])
return resp, self.deserialize(body)
@handle_errors
def get_api_description(self):
"""Retrieves all versions of the Ironic API."""
return self._list_request('', permanent=True)
@handle_errors
def get_version_description(self, version='v1'):
"""
Retrieves the desctription of the API.
:param version: The version of the API. Default: 'v1'.
:return: Serialized description of API resources.
"""
return self._list_request(version, permanent=True)
def _put_request(self, resource, put_object):
"""
Update specified object with JSON-patch.
"""
uri = self._get_uri(resource)
put_body = json.dumps(put_object)
resp, body = self.put(uri, body=put_body)
self.expected_success(202, resp['status'])
return resp, body

View File

@ -1,373 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.baremetal import base
class BaremetalClientJSON(base.BaremetalClient):
"""
Base Tempest REST client for Ironic API v1.
"""
version = '1'
uri_prefix = 'v1'
@base.handle_errors
def list_nodes(self, **kwargs):
"""List all existing nodes."""
return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_chassis_nodes(self, chassis_uuid):
"""List all nodes associated with a chassis."""
return self._list_request('/chassis/%s/nodes' % chassis_uuid)
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_node_ports(self, uuid):
"""List all ports associated with the node."""
return self._list_request('/nodes/%s/ports' % uuid)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""
Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_node_by_instance_uuid(self, instance_uuid):
"""
Gets a node associated with given instance uuid.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
return self._show_request('nodes',
uuid=None,
uri=uri)
@base.handle_errors
def show_chassis(self, uuid):
"""
Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""
Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
@base.handle_errors
def show_port_by_address(self, address):
"""
Gets a specific port by address.
:param address: MAC address of the port.
:return: Serialized port as a dictionary.
"""
uri = '/ports/detail?address=%s' % address
return self._show_request('ports', uuid=None, uri=uri)
def show_driver(self, driver_name):
"""
Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id=None, **kwargs):
"""
Create a baremetal node with the specified parameters.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpus: Number of CPUs. Default: 8.
:param local_gb: Disk size. Default: 1024.
:param memory_mb: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpus': kwargs.get('cpus', 8),
'local_gb': kwargs.get('local_gb', 1024),
'memory_mb': kwargs.get('memory_mb', 4096)},
'driver': kwargs.get('driver', 'fake')}
return self._create_request('nodes', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""
Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
return self._create_request('chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', port)
@base.handle_errors
def delete_node(self, uuid):
"""
Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""
Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""
Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def update_node(self, uuid, **kwargs):
"""
Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpus',
'properties/local_gb',
'properties/memory_mb',
'driver',
'instance_uuid')
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""
Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""
Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""
Get all driver interfaces of a specific node.
:param uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
@base.handle_errors
def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
"""
Set the boot device of the specified node.
:param node_uuid: The unique identifier of the node.
:param boot_device: The boot device name.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
"""
request = {'boot_device': boot_device, 'persistent': persistent}
resp, body = self._put_request('nodes/%s/management/boot_device' %
node_uuid, request)
self.expected_success(204, resp.status)
return body
@base.handle_errors
def get_node_boot_device(self, node_uuid):
"""
Get the current boot device of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_node_supported_boot_devices(self, node_uuid):
"""
Get the supported boot devices of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device/supported' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_console(self, node_uuid):
"""
Get connection information about the console.
:param node_uuid: Unique identifier of the node in UUID format.
"""
resp, body = self._show_request('nodes/states/console', node_uuid)
self.expected_success(200, resp.status)
return resp, body
@base.handle_errors
def set_console_mode(self, node_uuid, enabled):
"""
Start and stop the node console.
:param node_uuid: Unique identifier of the node in UUID format.
:param enabled: Boolean value; whether to enable or disable the
console.
"""
enabled = {'enabled': enabled}
resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
enabled)
self.expected_success(202, resp.status)
return resp, body

View File

@ -1,216 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import types
import boto
import boto.ec2
import boto.s3.connection
from six.moves import configparser as ConfigParser
from six.moves.urllib import parse as urlparse
from tempest_lib import exceptions as lib_exc
from tempest import config
CONF = config.CONF
class BotoClientBase(object):
ALLOWED_METHODS = set()
def __init__(self, identity_client):
self.identity_client = identity_client
self.ca_cert = CONF.identity.ca_certificates_file
self.connection_timeout = str(CONF.boto.http_socket_timeout)
self.num_retries = str(CONF.boto.num_retries)
self.build_timeout = CONF.boto.build_timeout
self.connection_data = {}
def _config_boto_timeout(self, timeout, retries):
try:
boto.config.add_section("Boto")
except ConfigParser.DuplicateSectionError:
pass
boto.config.set("Boto", "http_socket_timeout", timeout)
boto.config.set("Boto", "num_retries", retries)
def _config_boto_ca_certificates_file(self, ca_cert):
if ca_cert is None:
return
try:
boto.config.add_section("Boto")
except ConfigParser.DuplicateSectionError:
pass
boto.config.set("Boto", "ca_certificates_file", ca_cert)
def __getattr__(self, name):
"""Automatically creates methods for the allowed methods set."""
if name in self.ALLOWED_METHODS:
def func(self, *args, **kwargs):
with contextlib.closing(self.get_connection()) as conn:
return getattr(conn, name)(*args, **kwargs)
func.__name__ = name
setattr(self, name, types.MethodType(func, self, self.__class__))
setattr(self.__class__, name,
types.MethodType(func, None, self.__class__))
return getattr(self, name)
else:
raise AttributeError(name)
def get_connection(self):
self._config_boto_timeout(self.connection_timeout, self.num_retries)
self._config_boto_ca_certificates_file(self.ca_cert)
ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
'aws_secret_access_key': CONF.boto.aws_secret}
if not all(ec2_client_args.values()):
ec2_client_args = self.get_aws_credentials(self.identity_client)
self.connection_data.update(ec2_client_args)
return self.connect_method(**self.connection_data)
def get_aws_credentials(self, identity_client):
"""
Obtain existing, or create new AWS credentials
:param identity_client: identity client with embedded credentials
:return: EC2 credentials
"""
ec2_cred_list = identity_client.list_user_ec2_credentials(
identity_client.user_id)
for cred in ec2_cred_list:
if cred['tenant_id'] == identity_client.tenant_id:
ec2_cred = cred
break
else:
ec2_cred = identity_client.create_user_ec2_credentials(
identity_client.user_id, identity_client.tenant_id)
if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
raise lib_exc.NotFound("Unable to get access and secret keys")
else:
ec2_cred_aws = {}
ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
return ec2_cred_aws
class APIClientEC2(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_ec2(*args, **kwargs)
def __init__(self, identity_client):
super(APIClientEC2, self).__init__(identity_client)
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
purl = urlparse.urlparse(CONF.boto.ec2_url)
region_name = CONF.compute.region
if not region_name:
region_name = CONF.identity.region
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
endpoint=purl.hostname)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data.update({"is_secure": purl.scheme == "https",
"validate_certs": not insecure_ssl,
"region": region,
"host": purl.hostname,
"port": port,
"path": purl.path})
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
'delete_key_pair', 'import_key_pair',
'get_all_key_pairs',
'get_all_tags',
'create_image', 'get_image',
'register_image', 'deregister_image',
'get_all_images', 'get_image_attribute',
'modify_image_attribute', 'reset_image_attribute',
'get_all_kernels',
'create_volume', 'delete_volume',
'get_all_volume_status', 'get_all_volumes',
'get_volume_attribute', 'modify_volume_attribute'
'bundle_instance', 'cancel_spot_instance_requests',
'confirm_product_instanc',
'get_all_instance_status', 'get_all_instances',
'get_all_reserved_instances',
'get_all_spot_instance_requests',
'get_instance_attribute', 'monitor_instance',
'monitor_instances', 'unmonitor_instance',
'unmonitor_instances',
'purchase_reserved_instance_offering',
'reboot_instances', 'request_spot_instances',
'reset_instance_attribute', 'run_instances',
'start_instances', 'stop_instances',
'terminate_instances',
'attach_network_interface', 'attach_volume',
'detach_network_interface', 'detach_volume',
'get_console_output',
'delete_network_interface', 'create_subnet',
'create_network_interface', 'delete_subnet',
'get_all_network_interfaces',
'allocate_address', 'associate_address',
'disassociate_address', 'get_all_addresses',
'release_address',
'create_snapshot', 'delete_snapshot',
'get_all_snapshots', 'get_snapshot_attribute',
'modify_snapshot_attribute',
'reset_snapshot_attribute', 'trim_snapshots',
'get_all_regions', 'get_all_zones',
'get_all_security_groups', 'create_security_group',
'delete_security_group', 'authorize_security_group',
'authorize_security_group_egress',
'revoke_security_group',
'revoke_security_group_egress'))
class ObjectClientS3(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_s3(*args, **kwargs)
def __init__(self, identity_client):
super(ObjectClientS3, self).__init__(identity_client)
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
purl = urlparse.urlparse(CONF.boto.s3_url)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data.update({"is_secure": purl.scheme == "https",
"validate_certs": not insecure_ssl,
"host": purl.hostname,
"port": port,
"calling_format": boto.s3.connection.
OrdinaryCallingFormat()})
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
'get_all_buckets', 'get_bucket', 'delete_key',
'lookup'))

Some files were not shown because too many files have changed in this diff Show More