Merge "Remove intree tempest tests"

This commit is contained in:
Zuul 2018-02-06 07:29:15 +00:00 committed by Gerrit Code Review
commit a52cfbdbad
35 changed files with 0 additions and 4501 deletions

View File

@ -1,37 +0,0 @@
# Copyright 2015 Intel Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from tempest import config # noqa
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt('congress',
default=True,
help="Whether or not Congress is expected to be available"),
]
congressha_group = cfg.OptGroup(name="congressha", title="Congress HA Options")
CongressHAGroup = [
cfg.StrOpt("replica_type",
default="policyha",
help="service type used to create a replica congress server."),
cfg.IntOpt("replica_port",
default=4001,
help="The listening port for a replica congress server. "),
]

View File

@ -1,46 +0,0 @@
# Copyright 2015 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
from congress_tempest_tests import config as config_congress
class CongressTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "congress_tempest_tests/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(conf,
config_congress.service_available_group,
config_congress.ServiceAvailableGroup)
config.register_opt_group(conf, config_congress.congressha_group,
config_congress.CongressHAGroup)
def get_opt_lists(self):
return [
(config_congress.congressha_group.name,
config_congress.CongressHAGroup),
(config_congress.service_available_group.name,
config_congress.ServiceAvailableGroup)
]

View File

@ -1,68 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.network import base
class QosPoliciesClient(base.BaseNetworkClient):
def create_qos_policy(self, **kwargs):
"""Creates an OpenStack Networking qos_policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies'
post_data = {'policy': kwargs}
return self.create_resource(uri, post_data)
def update_security_group(self, qos_policy_id, **kwargs):
"""Updates a qos policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % qos_policy_id
post_data = {'policy': kwargs}
return self.update_resource(uri, post_data)
def show_qos_policy(self, qos_policy_id, **fields):
"""Shows details for a qos policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % qos_policy_id
return self.show_resource(uri, **fields)
def delete_qos_policy(self, qos_policy_id):
"""Deletes an OpenStack Networking qos policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % qos_policy_id
return self.delete_resource(uri)
def list_qos_policy(self, **filters):
"""Lists OpenStack Networking qos policies.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies'
return self.list_resources(uri, **filters)

View File

@ -1,73 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.network import base
class QosRuleClient(base.BaseNetworkClient):
def create_qos_rule(self, qos_policy_id, qos_rule_type, **kwargs):
"""Creates an OpenStack Networking qos rule.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s' % (qos_policy_id, qos_rule_type)
post_data = {qos_rule_type[:-1]: kwargs}
return self.create_resource(uri, post_data)
def update_qos_rule(self, qos_policy_id, qos_rule_type,
qos_rule_id, **kwargs):
"""Updates a qos rule policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s/%s' % (qos_policy_id,
qos_rule_type, qos_rule_id)
post_data = {'bandwidth_limit_rules': kwargs}
return self.update_resource(uri, post_data)
def show_qos_rule(self, qos_policy_id, qos_rule_type,
qos_rule_id, **fields):
"""Shows details for a qos rule policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s/%s' % (qos_policy_id,
qos_rule_type, qos_rule_id)
return self.show_resource(uri, **fields)
def delete_qos_rule(self, qos_policy_id, qos_rule_type, qos_rule_id):
"""Deletes an OpenStack Networking qos rule policy.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s/%s' % (qos_policy_id,
qos_rule_type, qos_rule_id)
return self.delete_resource(uri)
def list_qos_rule(self, qos_policy_id, **filters):
"""Lists OpenStack Networking qos rule policies.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % (qos_policy_id)
return self.list_resources(uri, **filters)

View File

@ -1,215 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class PolicyClient(rest_client.RestClient):
policy = '/v1/policies'
policy_path = '/v1/policies/%s'
policy_rules = '/v1/policies/%s/rules'
policy_rules_path = '/v1/policies/%s/rules/%s'
policy_tables = '/v1/policies/%s/tables'
policy_table_path = '/v1/policies/%s/tables/%s'
policy_rows = '/v1/policies/%s/tables/%s/rows'
policy_rows_trace = '/v1/policies/%s/tables/%s/rows?trace=True'
policies = '/v1/policies'
policies_status = '/v1/policies/%s/status'
policy_action = '/v1/policies/%s?%s'
library_policy = '/v1/librarypolicies'
library_policy_path = '/v1/librarypolicies/%s'
library_policies = '/v1/librarypolicies'
datasources = '/v1/data-sources'
datasource_path = '/v1/data-sources/%s'
datasource_tables = '/v1/data-sources/%s/tables'
datasource_table_path = '/v1/data-sources/%s/tables/%s'
datasource_status = '/v1/data-sources/%s/status'
datasource_schema = '/v1/data-sources/%s/schema'
datasource_table_schema = '/v1/data-sources/%s/tables/%s/spec'
datasource_rows = '/v1/data-sources/%s/tables/%s/rows'
driver = '/v1/system/drivers'
driver_path = '/v1/system/drivers/%s'
def _add_params_to_url(self, url, params):
for key in params:
url = url + '?{param_name}={param_value}'.format(
param_name=key, param_value=params[key])
return url
def _resp_helper(self, resp, body=None):
if body:
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_policy(self, body, params=None):
if params is None:
params = {}
body = json.dumps(body)
resp, body = self.post(
self._add_params_to_url(self.policy, params), body=body)
return self._resp_helper(resp, body)
def delete_policy(self, policy):
resp, body = self.delete(
self.policy_path % policy)
return self._resp_helper(resp, body)
def show_policy(self, policy):
resp, body = self.get(
self.policy_path % policy)
return self._resp_helper(resp, body)
def create_library_policy(self, body):
body = json.dumps(body)
resp, body = self.post(
self.library_policy, body=body)
return self._resp_helper(resp, body)
def delete_library_policy(self, policy):
resp, body = self.delete(
self.library_policy_path % policy)
return self._resp_helper(resp, body)
def show_library_policy(self, policy):
resp, body = self.get(
self.library_policy_path % policy)
return self._resp_helper(resp, body)
def create_policy_rule(self, policy_name, body=None):
body = json.dumps(body)
resp, body = self.post(
self.policy_rules % policy_name, body=body)
return self._resp_helper(resp, body)
def delete_policy_rule(self, policy_name, rule_id):
resp, body = self.delete(
self.policy_rules_path % (policy_name, rule_id))
return self._resp_helper(resp, body)
def show_policy_rule(self, policy_name, rule_id):
resp, body = self.get(
self.policy_rules_path % (policy_name, rule_id))
return self._resp_helper(resp, body)
def list_policy_rows(self, policy_name, table, trace=None):
if trace:
query = self.policy_rows_trace
else:
query = self.policy_rows
resp, body = self.get(query % (policy_name, table))
return self._resp_helper(resp, body)
def list_policy_rules(self, policy_name):
resp, body = self.get(self.policy_rules % (policy_name))
return self._resp_helper(resp, body)
def list_policy(self):
resp, body = self.get(self.policies)
return self._resp_helper(resp, body)
def list_library_policy(self):
resp, body = self.get(self.library_policies)
return self._resp_helper(resp, body)
def list_policy_tables(self, policy_name):
resp, body = self.get(self.policy_tables % (policy_name))
return self._resp_helper(resp, body)
def list_policy_status(self, policy_name):
resp, body = self.get(self.policies_status % (policy_name))
return self._resp_helper(resp, body)
def execute_policy_action(self, policy_name, action, trace, delta, body):
body = json.dumps(body)
uri = "?action=%s&trace=%s&delta=%s" % (action, trace, delta)
resp, body = self.post(
(self.policy_path % policy_name) + str(uri), body=body)
return self._resp_helper(resp, body)
def show_policy_table(self, policy_name, table_id):
resp, body = self.get(self.policy_table_path % (policy_name, table_id))
return self._resp_helper(resp, body)
def list_datasources(self):
resp, body = self.get(self.datasources)
return self._resp_helper(resp, body)
def list_datasource_tables(self, datasource_name):
resp, body = self.get(self.datasource_tables % (datasource_name))
return self._resp_helper(resp, body)
def list_datasource_rows(self, datasource_name, table_name):
resp, body = self.get(self.datasource_rows %
(datasource_name, table_name))
return self._resp_helper(resp, body)
def list_datasource_status(self, datasource_name):
resp, body = self.get(self.datasource_status % datasource_name)
return self._resp_helper(resp, body)
def show_datasource_schema(self, datasource_name):
resp, body = self.get(self.datasource_schema % datasource_name)
return self._resp_helper(resp, body)
def show_datasource_table_schema(self, datasource_name, table_name):
resp, body = self.get(self.datasource_table_schema %
(datasource_name, table_name))
return self._resp_helper(resp, body)
def show_datasource_table(self, datasource_name, table_id):
resp, body = self.get(self.datasource_table_path %
(datasource_name, table_id))
return self._resp_helper(resp, body)
def create_datasource(self, body=None):
body = json.dumps(body)
resp, body = self.post(
self.datasources, body=body)
return self._resp_helper(resp, body)
def delete_datasource(self, datasource):
resp, body = self.delete(
self.datasource_path % datasource)
return self._resp_helper(resp, body)
def update_datasource_row(self, datasource_name, table_id, rows):
body = json.dumps(rows)
resp, body = self.put(
self.datasource_rows % (datasource_name, table_id), body)
return self._resp_helper(resp)
def execute_datasource_action(self, service_name, action, body):
body = json.dumps(body)
uri = "?action=%s" % (action)
resp, body = self.post(
(self.datasource_path % service_name) + str(uri), body=body)
return self._resp_helper(resp, body)
def list_drivers(self):
resp, body = self.get(self.driver)
return self._resp_helper(resp, body)
def show_driver(self, driver):
resp, body = self.get(self.driver_path % (driver))
return self._resp_helper(resp, body)
def request_refresh(self, driver, body=None):
body = json.dumps(body)
resp, body = self.post(self.datasource_path %
(driver) + "?action=request-refresh",
body=body)
return self._resp_helper(resp, body)

View File

@ -1,25 +0,0 @@
====================
Tempest Integration
====================
This directory contains Tempest tests to cover Congress project.
To list all Congress tempest cases, go to tempest directory, then run::
$ testr list-tests congress
To run only these tests in tempest, go to tempest directory, then run::
$ ./run_tempest.sh -N -- congress
To run a single test case, go to tempest directory, then run with test case name, e.g.::
$ ./run_tempest.sh -N -- congress_tempest_tests.tests.scenario.test_congress_basic_ops.TestPolicyBasicOps.test_policy_basic_op
Alternatively, to run congress tempest plugin tests using tox, go to tempest directory, then run::
$ tox -eall-plugin congress
And, to run a specific test::
$ tox -eall-plugin congress_tempest_tests.tests.scenario.test_congress_basic_ops.TestPolicyBasicOps.test_policy_basic_op

View File

@ -1,171 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestKeystoneV2Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestKeystoneV2Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.keystone = self.os_primary.identity_client
self.tenants_client = self.os_primary.tenants_client
self.roles_client = self.os_primary.roles_client
self.users_client = self.os_primary.users_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'keystone')
@decorators.attr(type='smoke')
def test_keystone_users_table(self):
user_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'users')['columns'])
user_id_col = next(i for i, c in enumerate(user_schema)
if c['name'] == 'id')
def _check_data_table_keystone_users():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
users = self.users_client.list_users()['users']
user_map = {}
for user in users:
user_map[user['id']] = user
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'users'))
for row in results['results']:
try:
user_row = user_map[row['data'][user_id_col]]
except KeyError:
return False
for index in range(len(user_schema)):
if ((user_schema[index]['name'] == 'tenantId' and
'tenantId' not in user_row) or
(user_schema[index]['name'] == 'email' and
'email' not in user_row)):
# Keystone does not return the tenantId or email column
# if not present.
pass
elif (str(row['data'][index]) !=
str(user_row[user_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_users,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_roles_table(self):
role_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'roles')['columns'])
role_id_col = next(i for i, c in enumerate(role_schema)
if c['name'] == 'id')
def _check_data_table_keystone_roles():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
roles = self.roles_client.list_roles()['roles']
roles_map = {}
for role in roles:
roles_map[role['id']] = role
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'roles'))
for row in results['results']:
try:
role_row = roles_map[row['data'][role_id_col]]
except KeyError:
return False
for index in range(len(role_schema)):
if (str(row['data'][index]) !=
str(role_row[role_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_roles,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_tenants_table(self):
tenant_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'tenants')['columns'])
tenant_id_col = next(i for i, c in enumerate(tenant_schema)
if c['name'] == 'id')
def _check_data_table_keystone_tenants():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
tenants = self.tenants_client.list_tenants()['tenants']
tenants_map = {}
for tenant in tenants:
tenants_map[tenant['id']] = tenant
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'tenants'))
for row in results['results']:
try:
tenant_row = tenants_map[row['data'][tenant_id_col]]
except KeyError:
return False
for index in range(len(tenant_schema)):
if (str(row['data'][index]) !=
str(tenant_row[tenant_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_tenants,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('keystone'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,106 +0,0 @@
# Copyright 2016 NEC Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestAodhDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestAodhDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'aodh_plugin', False):
msg = ("%s skipped as aodh is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
def setUp(self):
super(TestAodhDriver, self).setUp()
self.alarms_client = self.os_admin.alarms_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'aodh')
@decorators.attr(type='smoke')
def test_aodh_alarms_table(self):
# Add test alarm
rule = {'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'period': 70}
self.alarms_client.create_alarm(name='test-alarm',
type='threshold',
enabled=False,
threshold_rule=rule)
alarms_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'alarms')['columns'])
alarms_id_col = next(i for i, c in enumerate(alarms_schema)
if c['name'] == 'alarm_id')
def _check_data_table_aodh_alarms():
# Fetch data from aodh each time, because this test may start
# before aodh has all the users.
alarms = self.alarms_client.list_alarms()
alarm_map = {}
for alarm in alarms:
alarm_map[alarm['alarm_id']] = alarm
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'alarms'))
rule_data = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'alarms.threshold_rule'))['results']
for row in results['results']:
try:
alarm_row = alarm_map[row['data'][alarms_id_col]]
except KeyError:
return False
for index in range(len(alarms_schema)):
if alarms_schema[index]['name'] == 'threshold_rule_id':
threshold_rule = alarm_row['threshold_rule']
data = [r['data'] for r in rule_data
if r['data'][0] == row['data'][index]]
for l in data:
if str(threshold_rule[l[1]]) != str(l[2]):
return False
continue
if (str(row['data'][index]) !=
str(alarm_row[alarms_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data_table_aodh_alarms,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('aodh'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,128 +0,0 @@
# Copyright 2017 Orange. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"Tempest tests for config datasource"
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
def find_col(schema, name):
"Finds the index of a column in a congress table."
return next(i for i, c in enumerate(schema) if c['name'] == name)
class TestCfgValidatorDriver(manager_congress.ScenarioPolicyBase):
"""Tempest tests for the config datasource.
Checks that the datasource is available and test it on congress
configuration files.
"""
def setUp(self):
super(TestCfgValidatorDriver, self).setUp()
self.keypairs = {}
self.servers = []
datasources = self.os_admin.congress_client.list_datasources()
for datasource in datasources['results']:
if datasource['name'] == 'config':
self.datasource_id = datasource['id']
return
self.skipTest('no datasource config configured.')
@decorators.attr(type='smoke')
def test_update_no_error(self):
"Test that config datasource is correctly launched."
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('config'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')
@decorators.attr(type='smoke')
def test_metadata_sent(self):
"Test that metadata on congress options are sent."
client = self.os_admin.congress_client
schema1 = (
client.show_datasource_table_schema(
self.datasource_id, 'option')['columns'])
col1_name = find_col(schema1, 'name')
col1_group = find_col(schema1, 'group')
col1_namespace = find_col(schema1, 'namespace')
schema2 = (
client.show_datasource_table_schema(
self.datasource_id, 'namespace')['columns'])
col2_name = find_col(schema2, 'name')
col2_id = find_col(schema2, 'id')
def _check_metadata():
res1 = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'option')).get('results', None)
res2 = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'namespace')).get('results', None)
if res1 is None or res2 is None:
return False
row1 = next((r for r in res1
if r['data'][col1_name] == u'datasource_sync_period'),
None)
row2 = next((r for r in res2
if r['data'][col2_name] == u'congress'),
None)
if row1 is None or row2 is None:
return False
if row1['data'][col1_group] != 'DEFAULT':
return False
return row1['data'][col1_namespace] == row2['data'][col2_id]
if not test_utils.call_until_true(
func=_check_metadata,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_options_sent(self):
"Test that there is at least one value for congress option."
driver = u'congress.datasources.cfgvalidator_driver.ValidatorDriver'
client = self.os_admin.congress_client
schema = (
client.show_datasource_table_schema(
self.datasource_id, 'binding')['columns'])
col_value = find_col(schema, 'val')
def _check_value():
res = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'binding')).get('results', None)
if res is None:
return False
row = next((r for r in res
if r['data'][col_value] == driver),
None)
return row is not None
if not test_utils.call_until_true(
func=_check_value,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -1,116 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import clients
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestCinderDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestCinderDriver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestCinderDriver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.cinder = self.os_primary.volumes_v2_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'cinder')
res = self.cinder.create_volume(size=1, description=None, name='v0',
consistencygroup_id=None, metadata={})
LOG.debug('result of creating new volume: %s', res)
@decorators.attr(type='smoke')
def test_cinder_volumes_table(self):
volume_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'volumes')['columns'])
volume_id_col = next(i for i, c in enumerate(volume_schema)
if c['name'] == 'id')
def _check_data_table_cinder_volumes():
# Fetch data from cinder each time, because this test may start
# before cinder has all the users.
volumes = self.cinder.list_volumes()['volumes']
LOG.debug('cinder volume list: %s', volumes)
volumes_map = {}
for volume in volumes:
volumes_map[volume['id']] = volume
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'volumes'))
LOG.debug('congress cinder volumes table: %s', results)
# check that congress and cinder return the same volume IDs
rows_volume_id_set = set()
for row in results['results']:
rows_volume_id_set.add(row['data'][volume_id_col])
if rows_volume_id_set != frozenset(volumes_map.keys()):
LOG.debug('volumes IDs mismatch')
return False
# FIXME(ekcs): the following code is broken because 'user_id'
# and 'description' fields do not appear in results provided by
# [tempest].os.volumes_client.list_volumes().
# Detailed checking disabled for now. Re-enable when fixed.
# It appears the code was written for v1 volumes client but never
# worked. The problem was not evident because the list of volumes
# was empty.
# Additional adaptation is needed for v2 volumes client.
# for row in results['results']:
# try:
# volume_row = volumes_map[row['data'][volume_id_col]]
# except KeyError:
# return False
# for index in range(len(volume_schema)):
# if (str(row['data'][index]) !=
# str(volume_row[volume_schema[index]['name']])):
# return False
return True
if not test_utils.call_until_true(
func=_check_data_table_cinder_volumes,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('cinder'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,87 +0,0 @@
# Copyright 2016 NTT All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import helper
from congress_tempest_tests.tests.scenario import manager_congress
class TestDoctorDriver(manager_congress.ScenarioPolicyBase):
def setUp(self):
super(TestDoctorDriver, self).setUp()
doctor_setting = {
'name': 'doctor',
'driver': 'doctor',
'config': None,
}
self.client = self.os_admin.congress_client
response = self.client.create_datasource(doctor_setting)
self.datasource_id = response['id']
def tearDown(self):
super(TestDoctorDriver, self).tearDown()
self.client.delete_datasource(self.datasource_id)
def _list_datasource_rows(self, datasource, table):
return self.client.list_datasource_rows(datasource, table)
@decorators.attr(type='smoke')
def test_doctor_event_tables(self):
rows = [
{
"time": "2016-02-22T11:48:55Z",
"type": "compute.host.down",
"details": {
"hostname": "compute1",
"status": "down",
"monitor": "zabbix1",
"monitor_event_id": "111"
}
}
]
expected_row = [
"2016-02-22T11:48:55Z",
"compute.host.down",
"compute1",
"down",
"zabbix1",
"111"
]
# Check if service is up
@helper.retry_on_exception
def _check_service():
self.client.list_datasource_status(self.datasource_id)
return True
if not test_utils.call_until_true(func=_check_service,
duration=60, sleep_for=1):
raise exceptions.TimeoutException("Doctor dataservice is not up")
self.client.update_datasource_row(self.datasource_id, 'events', rows)
results = self._list_datasource_rows(self.datasource_id, 'events')
if len(results['results']) != 1:
error_msg = ('Unexpected additional rows are '
'inserted. row details: %s' % results['results'])
raise exceptions.InvalidStructure(error_msg)
if results['results'][0]['data'] != expected_row:
msg = ('inserted row %s is not expected row %s'
% (results['data'], expected_row))
raise exceptions.InvalidStructure(msg)

View File

@ -1,136 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestGlanceV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestGlanceV2Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestGlanceV2Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.glancev2 = self.os_primary.image_client_v2
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'glancev2')
@decorators.attr(type='smoke')
@utils.services('image')
def test_glancev2_images_table(self):
image_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'images')['columns'])
image_id_col = next(i for i, c in enumerate(image_schema)
if c['name'] == 'id')
def _check_data_table_glancev2_images():
# Fetch data from glance each time, because this test may start
# before glance has all the users.
images = self.glancev2.list_images()['images']
image_map = {}
for image in images:
image_map[image['id']] = image
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'images'))
for row in results['results']:
try:
image_row = image_map[row['data'][image_id_col]]
except KeyError:
return False
for index in range(len(image_schema)):
# glancev2 doesn't return kernel_id/ramdisk_id if
# it isn't present...
if ((image_schema[index]['name'] == 'kernel_id' and
'kernel_id' not in row['data']) or
(image_schema[index]['name'] == 'ramdisk_id' and
'ramdisk_id' not in row['data'])):
continue
# FIXME(arosen): congress-server should retain the type
# but doesn't today.
if (str(row['data'][index]) !=
str(image_row[image_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_glancev2_images,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('image')
def test_glancev2_tags_table(self):
def _check_data_table_glance_images():
# Fetch data from glance each time, because this test may start
# before glance has all the users.
images = self.glancev2.list_images()['images']
image_tag_map = {}
for image in images:
image_tag_map[image['id']] = image['tags']
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'tags'))
for row in results['results']:
image_id, tag = row['data'][0], row['data'][1]
glance_image_tags = image_tag_map.get(image_id)
if not glance_image_tags:
# congress had image that glance doesn't know about.
return False
if tag not in glance_image_tags:
# congress had a tag that wasn't on the image.
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_glance_images,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('glancev2'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,45 +0,0 @@
# Copyright 2017 VMware Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestHeatDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestHeatDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'heat_plugin', False):
msg = ("%s skipped because heat service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('heat'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,45 +0,0 @@
# Copyright 2017 VMware Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestIronicDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestIronicDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'ironic', False):
msg = ("%s skipped because ironic service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('ironic'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,206 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestKeystoneV3Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestKeystoneV3Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestKeystoneV3Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.keystone = self.os_primary.identity_v3_client
self.projects_client = self.os_primary.projects_client
self.domains_client = self.os_primary.domains_client
self.roles_client = self.os_primary.roles_v3_client
self.users_client = self.os_primary.users_v3_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'keystonev3')
@decorators.attr(type='smoke')
def test_keystone_users_table(self):
user_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'users')['columns'])
user_id_col = next(i for i, c in enumerate(user_schema)
if c['name'] == 'id')
def _check_data_table_keystone_users():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
users = self.users_client.list_users()['users']
user_map = {}
for user in users:
user_map[user['id']] = user
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'users'))
for row in results['results']:
try:
user_row = user_map[row['data'][user_id_col]]
except KeyError:
return False
for index in range(len(user_schema)):
if ((user_schema[index]['name'] == 'default_project_id' and
'default_project_id' not in user_row)):
# Keystone does not return the tenantId or email column
# if not present.
pass
elif (str(row['data'][index]) !=
str(user_row[user_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_users,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_roles_table(self):
role_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'roles')['columns'])
role_id_col = next(i for i, c in enumerate(role_schema)
if c['name'] == 'id')
def _check_data_table_keystone_roles():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
roles = self.roles_client.list_roles()['roles']
roles_map = {}
for role in roles:
roles_map[role['id']] = role
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'roles'))
for row in results['results']:
try:
role_row = roles_map[row['data'][role_id_col]]
except KeyError:
return False
for index in range(len(role_schema)):
if (str(row['data'][index]) !=
str(role_row[role_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_roles,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_domains_table(self):
domains_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'domains')['columns'])
domain_id_col = next(i for i, c in enumerate(domains_schema)
if c['name'] == 'id')
def _check_data_table_keystone_domains():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
domains = self.domains_client.list_domains()['domains']
domains_map = {}
for domain in domains:
domains_map[domain['id']] = domain
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'domains'))
for row in results['results']:
try:
domain_row = domains_map[row['data'][domain_id_col]]
except KeyError:
return False
for index in range(len(domains_schema)):
if (str(row['data'][index]) !=
str(domain_row[domains_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_domains,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_projects_table(self):
projects_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'projects')['columns'])
project_id_col = next(i for i, c in enumerate(projects_schema)
if c['name'] == 'id')
def _check_data_table_keystone_projects():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
projects = self.projects_client.list_projects()['projects']
projects_map = {}
for project in projects:
projects_map[project['id']] = project
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'projects'))
for row in results['results']:
try:
project_row = projects_map[row['data'][project_id_col]]
except KeyError:
return False
for index in range(len(projects_schema)):
if (str(row['data'][index]) !=
str(project_row[projects_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_projects,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('keystonev3'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,209 +0,0 @@
# Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestMuranoDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestMuranoDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'murano', False):
msg = ("%s skipped as murano is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestMuranoDriver, self).setUp()
self.congress_client = (
self.os_admin.congress_client)
@decorators.attr(type='smoke')
@utils.services('compute')
def test_murano_predeployment(self):
def _delete_policy_rules(policy_name):
result = self.congress_client.list_policy_rules(
policy_name)['results']
for rule in result:
self.congress_client.delete_policy_rule(
policy_name,
rule['id'])
def _create_random_policy():
policy_name = "murano_%s" % ''.join(
random.choice(string.ascii_lowercase)
for x in range(10))
body = {"name": policy_name}
resp = self.congress_client.create_policy(body)
self.addCleanup(_delete_policy_rules, resp['name'])
return resp['name']
def _create_datasource():
body = {"config": {"username": CONF.auth.admin_username,
"tenant_name": CONF.auth.admin_project_name,
"password": CONF.auth.admin_password,
"auth_url": CONF.identity.uri},
"driver": "murano",
"name": "murano"}
try:
datasource = self.congress_client.create_datasource(body)['id']
self.addCleanup(self.congress_client.delete_datasource,
datasource)
except exceptions.Conflict:
pass
def _create_rule(policy_name, rule):
self.congress_client.create_policy_rule(policy_name, rule)
def _simulate_policy(policy_name, query):
resp = self.congress_client.execute_policy_action(
policy_name,
"simulate",
False,
False,
query)
return resp['result']
rule1 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.medium\")"
}
rule2 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.small\")"
}
rule3 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.tiny\")"
}
rule4 = {
"rule": "murano_pending_envs(env_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:states(env_id, env_state),"
"equal(env_state, \"pending\")"
}
rule5 = {
"rule": "murano_instances(env_id, instance_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:objects(service_id, env_id, service_type),"
"murano:parent_types(service_id, \"io.murano.Object\"),"
"murano:parent_types(service_id, \"io.murano.Application\"),"
"murano:parent_types(service_id, service_type),"
"murano:objects(instance_id, service_id, instance_type),"
"murano:parent_types(instance_id,"
"\"io.murano.resources.Instance\"),"
"murano:parent_types(instance_id, \"io.murano.Object\"),"
"murano:parent_types(instance_id, instance_type)"
}
rule6 = {
"rule": "murano_instance_flavors(instance_id, flavor) :- "
"murano:properties(instance_id, \"flavor\", flavor)"
}
rule7 = {
"rule": "predeploy_error(env_id) :- "
"murano_pending_envs(env_id),"
"murano_instances(env_id, instance_id),"
"murano_instance_flavors(instance_id, flavor),"
"not allowed_flavors(flavor)"
}
sim_query1 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.small\")"
}
sim_query2 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.large\")"
}
_create_datasource()
policy_name = _create_random_policy()
_create_rule(policy_name, rule1)
_create_rule(policy_name, rule2)
_create_rule(policy_name, rule3)
_create_rule(policy_name, rule4)
_create_rule(policy_name, rule5)
_create_rule(policy_name, rule6)
_create_rule(policy_name, rule7)
result = _simulate_policy(policy_name, sim_query1)
self.assertEmpty(result)
result = _simulate_policy(policy_name, sim_query2)
self.assertEqual('predeploy_error("env_uuid")', result[0])
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('murano'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,446 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest import clients
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import helper
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestNeutronV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestNeutronV2Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.neutron:
skip_msg = ("%s skipped as neutron is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestNeutronV2Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.networks_client = self.os_primary.networks_client
self.subnets_client = self.os_primary.subnets_client
self.ports_client = self.os_primary.ports_client
self.security_groups_client = self.os_primary.security_groups_client
self.routers_client = self.os_primary.routers_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'neutronv2')
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_networks_table(self):
@helper.retry_on_exception
def _check_data():
networks = self.networks_client.list_networks()
network_map = {}
for network in networks['networks']:
network_map[network['id']] = network
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
network_schema = (client.show_datasource_table_schema(
self.datasource_id, 'networks')['columns'])
results = (client.list_datasource_rows(
self.datasource_id, 'networks'))
for row in results['results']:
network_row = network_map[row['data'][0]]
for index in range(len(network_schema)):
if (str(row['data'][index]) !=
str(network_row[network_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_ports_tables(self):
port_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'ports')['columns'])
port_sec_binding_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_group_port_bindings')['columns'])
fixed_ips_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'fixed_ips')['columns'])
@helper.retry_on_exception
def _check_data():
ports_from_neutron = self.ports_client.list_ports()
port_map = {}
for port in ports_from_neutron['ports']:
port_map[port['id']] = port
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
ports = (client.list_datasource_rows(self.datasource_id, 'ports'))
security_group_port_bindings = (
client.list_datasource_rows(
self.datasource_id, 'security_group_port_bindings'))
fixed_ips = (
client.list_datasource_rows(self.datasource_id, 'fixed_ips'))
# Validate ports table
for row in ports['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_schema)):
if (str(row['data'][index]) !=
str(port_row[port_schema[index]['name']])):
return False
# validate security_group_port_bindings table
for row in security_group_port_bindings['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_sec_binding_schema)):
row_index = port_sec_binding_schema[index]['name']
# Translate port_id -> id
if row_index == 'port_id':
if (str(row['data'][index]) !=
str(port_row['id'])):
return False
elif row_index == 'security_group_id':
if (str(row['data'][index]) not in
port_row['security_groups']):
return False
# validate fixed_ips
for row in fixed_ips['results']:
port_row = port_map[row['data'][0]]
for index in range(len(fixed_ips_schema)):
row_index = fixed_ips_schema[index]['name']
if row_index in ['subnet_id', 'ip_address']:
if not port_row['fixed_ips']:
continue
for fixed_ip in port_row['fixed_ips']:
if row['data'][index] == fixed_ip[row_index]:
break
else:
# no subnet_id/ip_address match found
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_subnets_tables(self):
subnet_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'subnets')['columns'])
host_routes_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'host_routes')['columns'])
dns_nameservers_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'dns_nameservers')['columns'])
allocation_pools_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'allocation_pools')['columns'])
@helper.retry_on_exception
def _check_data():
subnets_from_neutron = self.subnets_client.list_subnets()
subnet_map = {}
for subnet in subnets_from_neutron['subnets']:
subnet_map[subnet['id']] = subnet
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
subnets = (
client.list_datasource_rows(self.datasource_id, 'subnets'))
host_routes = (
client.list_datasource_rows(self.datasource_id, 'host_routes'))
dns_nameservers = (
client.list_datasource_rows(
self.datasource_id, 'dns_nameservers'))
allocation_pools = (
client.list_datasource_rows(
self.datasource_id, 'allocation_pools'))
# Validate subnets table
for row in subnets['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(subnet_schema)):
if (str(row['data'][index]) !=
str(subnet_row[subnet_schema[index]['name']])):
return False
# validate dns_nameservers
for row in dns_nameservers['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(dns_nameservers_schema)):
row_index = dns_nameservers_schema[index]['name']
if row_index in ['dns_nameserver']:
if (row['data'][index]
not in subnet_row['dns_nameservers']):
return False
# validate host_routes
for row in host_routes['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(host_routes_schema)):
row_index = host_routes_schema[index]['name']
if row_index in ['destination', 'nexthop']:
if not subnet_row['host_routes']:
continue
for host_route in subnet_row['host_routes']:
if row['data'][index] == host_route[row_index]:
break
else:
# no destination/nexthop match found
return False
# validate allocation_pools
for row in allocation_pools['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(allocation_pools_schema)):
row_index = allocation_pools_schema[index]['name']
if row_index in ['start', 'end']:
if not subnet_row['allocation_pools']:
continue
for allocation_pool in subnet_row['allocation_pools']:
if (row['data'][index] ==
allocation_pool[row_index]):
break
else:
# no destination/nexthop match found
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_routers_tables(self):
router_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'routers')['columns'])
ext_gw_info_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'external_gateway_infos')['columns'])
@helper.retry_on_exception
def _check_data():
routers_from_neutron = self.routers_client.list_routers()
router_map = {}
for router in routers_from_neutron['routers']:
router_map[router['id']] = router
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
routers = (
client.list_datasource_rows(self.datasource_id, 'routers'))
ext_gw_info = (
client.list_datasource_rows(
self.datasource_id, 'external_gateway_infos'))
# Validate routers table
for row in routers['results']:
router_row = router_map[row['data'][0]]
for index in range(len(router_schema)):
if (str(row['data'][index]) !=
str(router_row[router_schema[index]['name']])):
return False
# validate external_gateway_infos
for row in ext_gw_info['results']:
router_ext_gw_info = (
router_map[row['data'][0]]['external_gateway_info'])
# populate router_id
router_ext_gw_info['router_id'] = row['data'][0]
for index in range(len(ext_gw_info_schema)):
val = router_ext_gw_info[ext_gw_info_schema[index]['name']]
if (str(row['data'][index]) != str(val)):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_security_groups_table(self):
sg_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_groups')['columns'])
@helper.retry_on_exception
def _check_data():
client = self.security_groups_client
security_groups_neutron = client.list_security_groups()
security_groups_map = {}
for security_group in security_groups_neutron['security_groups']:
security_groups_map[security_group['id']] = security_group
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
security_groups = (
client.list_datasource_rows(
self.datasource_id, 'security_groups'))
# Validate security_group table
for row in security_groups['results']:
sg_row = security_groups_map[row['data'][0]]
for index in range(len(sg_schema)):
if (str(row['data'][index]) !=
str(sg_row[sg_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_security_group_rules_table(self):
sgrs_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_group_rules')['columns'])
@helper.retry_on_exception
def _check_data():
client = self.security_groups_client
security_groups_neutron = client.list_security_groups()
sgrs_map = {} # security_group_rules
for sg in security_groups_neutron['security_groups']:
for sgr in sg['security_group_rules']:
sgrs_map[sgr['id']] = sgr
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
security_group_rules = (
client.list_datasource_rows(
self.datasource_id, 'security_group_rules'))
# Validate security_group_rules table
for row in security_group_rules['results']:
sg_rule_row = sgrs_map[row['data'][1]]
for index in range(len(sgrs_schema)):
if (str(row['data'][index]) !=
str(sg_rule_row[sgrs_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_neutronv2_attach_detach_port_security_group(self):
self.network, self.subnet, self.router = self.create_networks()
self.check_networks()
# first create a test port with exactly 1 (default) security group
post_body = {
"port_security_enabled": True,
"network_id": self.network['id']}
body = self.ports_client.create_port(**post_body)
test_port = body['port']
self.addCleanup(self.ports_client.delete_port, test_port['id'])
# test detach and re-attach
test_policy = self._create_random_policy()
# use rules to detach group
self._create_policy_rule(
test_policy,
'execute[neutronv2:detach_port_security_group("%s", "%s")] '
':- p(1)' % (
test_port['id'], test_port['security_groups'][0]))
self._create_policy_rule(test_policy, 'p(1)')
def _check_data(num_sec_grps):
updated_port = self.ports_client.show_port(test_port['id'])
return len(updated_port['port']['security_groups']) == num_sec_grps
if not test_utils.call_until_true(func=lambda: _check_data(0),
duration=30, sleep_for=1):
raise exceptions.TimeoutException("Security group did not detach "
"within allotted time.")
# use rules to attach group
self._create_policy_rule(
test_policy,
'execute[neutronv2:attach_port_security_group("%s", "%s")] '
':- p(2)' % (
test_port['id'], test_port['security_groups'][0]))
self._create_policy_rule(test_policy, 'p(2)')
if not test_utils.call_until_true(func=lambda: _check_data(1),
duration=30, sleep_for=1):
raise exceptions.TimeoutException("Security group did not attach "
"within allotted time.")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('neutronv2'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,248 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest import clients
from tempest.common import utils as tempest_utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import helper
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
RULE_TYPE = "bandwidth_limit_rules"
class TestNeutronV2QosDriver(manager_congress.ScenarioPolicyBase):
DATASOURCE_NAME = 'neutronv2_qos'
@classmethod
def skip_checks(cls):
super(TestNeutronV2QosDriver, cls).skip_checks()
# TODO(qos): check whether QoS extension is enabled
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.neutron:
skip_msg = ("%s skipped as neutron is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
if not tempest_utils.is_extension_enabled('qos', 'network'):
skip_msg = ("%s skipped as neutron QoS extension is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestNeutronV2QosDriver, self).setUp()
self.qos_rules = []
self.qos_policies = []
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
body = {"config": {"username": CONF.auth.admin_username,
"tenant_name": CONF.auth.admin_project_name,
"password": CONF.auth.admin_password,
"auth_url": CONF.identity.uri},
"driver": self.DATASOURCE_NAME,
"name": self.DATASOURCE_NAME}
try:
self.os_admin.congress_client.create_datasource(body)['id']
except exceptions.Conflict:
pass
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, self.DATASOURCE_NAME)
# Get client
self.admin_qos_client = self.os_admin.qos_client
self.admin_qos_rule_client = self.os_admin.qos_rule_client
self.networks_client = self.os_primary.networks_client
self.ports_client = self.os_primary.ports_client
# Create qos and qos rule
self.qos_policy = self._create_qos_policy('test_qos_policy',
description="test",
share=True)
self.qos_rule = self._create_qos_bandwidth_limit_rule(
self.qos_policy['id'], 1000, 1000)
# Associate policy with port
body = self.networks_client.create_network(
name="test_qos_network")
self.network = body["network"]
body = self.ports_client.create_port(
network_id=self.network['id'])
self.port = body["port"]
self.ports_client.update_port(
self.port['id'], qos_policy_id=self.qos_policy['id'])
def tearDown(self):
super(TestNeutronV2QosDriver, self).tearDown()
# Clear port and net
self.ports_client.delete_port(self.port['id'])
self.networks_client.delete_network(self.network["id"])
# Clear qos policy and qos rule
self.admin_qos_rule_client.delete_qos_rule(
self.qos_policy['id'], RULE_TYPE, self.qos_rule['id'])
self.admin_qos_client.delete_qos_policy(self.qos_policy['id'])
def _create_qos_policy(self, name, description=None, share=False):
"""Wrapper utility that returns a test QoS policy."""
body = self.admin_qos_client.create_qos_policy(
name=name, description=description, shared=share)
qos_policy = body['policy']
self.qos_policies.append(qos_policy)
return qos_policy
def _create_qos_bandwidth_limit_rule(self, policy_id, max_kbps,
max_burst_kbps,
direction='egress'):
"""Wrapper utility that returns a test QoS bandwidth limit rule."""
rule_type = RULE_TYPE
body = self.admin_qos_rule_client.create_qos_rule(
policy_id, rule_type,
max_kbps=max_kbps, max_burst_kbps=max_burst_kbps,
direction=direction)
qos_rule = body['bandwidth_limit_rule']
self.qos_rules.append(qos_rule)
return qos_rule
@decorators.attr(type='smoke')
@tempest_utils.services('network')
def test_neutronv2_ports_tables(self):
port_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'ports')['columns'])
port_qos_binding_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'qos_policy_port_bindings')['columns'])
qos_policy_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'policies')['columns'])
qos_rule_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'rules')['columns'])
@helper.retry_on_exception
def _check_data_for_port():
ports_from_neutron = self.ports_client.list_ports()
port_map = {}
for port in ports_from_neutron['ports']:
port_map[port['id']] = port
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
ports = (client.list_datasource_rows(self.datasource_id, 'ports'))
qos_policy_port_bindings = (
client.list_datasource_rows(
self.datasource_id, 'qos_policy_port_bindings'))
# Validate ports table
for row in ports['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_schema)):
if (str(row['data'][index]) !=
str(port_row[port_schema[index]['name']])):
return False
# validate qos_policy_port_bindings table
for row in qos_policy_port_bindings['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_qos_binding_schema)):
row_index = port_qos_binding_schema[index]['name']
# Translate port_id -> id
if row_index == 'port_id':
if (str(row['data'][index]) !=
str(port_row['id'])):
return False
elif row_index == 'qos_policy_id':
if (str(row['data'][index]) not in
port_row['policies']):
return False
return True
@helper.retry_on_exception
def _check_data_for_qos():
qos_from_neutron = self.admin_qos_client.list_qos_policy()
rule_from_neutron = self.admin_qos_rule_client.list_qos_rule(
self.qos_policy['id'])
policy_map = {}
rule_map = {}
for policy in qos_from_neutron['policies']:
policy_map[policy['id']] = policy
for rule in rule_from_neutron['policy']['rules']:
rule_map[self.qos_policy['id']] = rule
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
qos_policies = (client.list_datasource_rows(
self.datasource_id, 'policies'))
qos_rules = (client.list_datasource_rows(
self.datasource_id, 'rules'))
# Validate policies table
for row in qos_policies['results']:
policy_row = policy_map[row['data'][0]]
for index in range(len(qos_policy_schema)):
if (str(row['data'][index]) !=
str(policy_row[qos_policy_schema[index]['name']])):
return False
# Validate rules table
for row in qos_rules['results']:
rule_row = rule_map[row['data'][0]]
for index in range(len(qos_rule_schema)):
if str(row['data'][index]) != "None":
if (str(row['data'][index]) !=
str(rule_row[qos_rule_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data_for_port,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
if not test_utils.call_until_true(func=_check_data_for_qos,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error(
self.DATASOURCE_NAME),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,152 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import helper
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestNovaDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestNovaDriver, cls).skip_checks()
if not CONF.service_available.nova:
skip_msg = ("%s skipped as nova is not available" % cls.__name__)
raise cls.skipException(skip_msg)
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestNovaDriver, self).setUp()
self.keypairs = {}
self.servers = []
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'nova')
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_nova_datasource_driver_servers(self):
self._setup_network_and_servers()
server_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'servers')['columns'])
# Convert some of the column names.
def convert_col(col):
if col == 'host_id':
return 'hostId'
elif col == 'image_id':
return 'image'
elif col == 'flavor_id':
return 'flavor'
elif col == 'zone':
return 'OS-EXT-AZ:availability_zone'
elif col == 'host_name':
return 'OS-EXT-SRV-ATTR:hypervisor_hostname'
else:
return col
keys = [convert_col(c['name']) for c in server_schema]
@helper.retry_on_exception
def _check_data_table_nova_servers():
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'servers'))
for row in results['results']:
match = True
for index in range(len(keys)):
if keys[index] in ['image', 'flavor']:
val = self.servers[0][keys[index]]['id']
# Test servers created doesn't have this attribute,
# so ignoring the same in tempest tests.
elif keys[index] in \
['OS-EXT-SRV-ATTR:hypervisor_hostname']:
continue
else:
val = self.servers[0][keys[index]]
if row['data'][index] != val:
match = False
break
if match:
return True
return False
if not test_utils.call_until_true(func=_check_data_table_nova_servers,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_nova_datasource_driver_flavors(self):
@helper.retry_on_exception
def _check_data_table_nova_flavors():
# Fetch data from nova each time, because this test may start
# before nova has all the users.
flavors = self.flavors_client.list_flavors(detail=True)
flavor_id_map = {}
for flavor in flavors['flavors']:
flavor_id_map[flavor['id']] = flavor
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'flavors'))
# TODO(alexsyip): Not sure what the following OS-FLV-EXT-DATA:
# prefix is for.
keys = ['id', 'name', 'vcpus', 'ram', 'disk',
'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor']
for row in results['results']:
match = True
try:
flavor_row = flavor_id_map[row['data'][0]]
except KeyError:
return False
for index in range(len(keys)):
if row['data'][index] != flavor_row[keys[index]]:
match = False
break
if match:
return True
return False
if not test_utils.call_until_true(func=_check_data_table_nova_flavors,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('nova'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,47 +0,0 @@
# Copyright 2017 VMware Inc. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestSwiftDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestSwiftDriver, cls).skip_checks()
if not CONF.service_available.swift:
msg = ("%s skipped because swift service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
# swift driver experiences auth error in test
@decorators.skip_because(bug="980688")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('swift'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,297 +0,0 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import subprocess
import tempfile
from oslo_log import log as logging
import six
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import manager as tempestmanager
from urllib3 import exceptions as urllib3_exceptions
from congress_tempest_tests.services.policy import policy_client
from congress_tempest_tests.tests.scenario import helper
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestHA(manager_congress.ScenarioPolicyBase):
def setUp(self):
super(TestHA, self).setUp()
self.keypairs = {}
self.servers = []
self.replicas = {}
self.services_client = self.os_admin.identity_services_v3_client
self.endpoints_client = self.os_admin.endpoints_v3_client
self.client = self.os_admin.congress_client
def _prepare_replica(self, port_num):
replica_url = "http://127.0.0.1:%d" % port_num
resp = self.services_client.create_service(
name='congressha',
type=CONF.congressha.replica_type,
description='policy ha service')
self.replica_service_id = resp['service']['id']
resp = self.endpoints_client.create_endpoint(
service_id=self.replica_service_id,
region=CONF.identity.region,
interface='public',
url=replica_url)
self.replica_endpoint_id = resp['endpoint']['id']
def _cleanup_replica(self):
self.endpoints_client.delete_endpoint(self.replica_endpoint_id)
self.services_client.delete_service(self.replica_service_id)
def start_replica(self, port_num):
self._prepare_replica(port_num)
f = tempfile.NamedTemporaryFile(mode='w', suffix='.conf',
prefix='congress%d-' % port_num,
dir='/tmp', delete=False)
conf_file = f.name
template = open('/etc/congress/congress.conf')
conf = template.read()
# Add 'bind_port' and 'datasource_sync_period' to conf file.
index = conf.find('[DEFAULT]') + len('[DEFAULT]\n')
conf = (conf[:index] +
'bind_port = %d\n' % port_num +
conf[index:])
# set datasource sync period interval to 5
conf = conf.replace('datasource_sync_period = 30',
'datasource_sync_period = 5')
sindex = conf.find('signing_dir')
conf = conf[:sindex] + '#' + conf[sindex:]
conf = conf + '\n[dse]\nbus_id = replica-node\n'
LOG.debug("Configuration file for replica: %s\n", conf)
f.write(conf)
f.close()
# start all services on replica node
api = self.start_service('api', conf_file)
pe = self.start_service('policy-engine', conf_file)
data = self.start_service('datasources', conf_file)
assert port_num not in self.replicas
LOG.debug("successfully started replica services\n")
self.replicas[port_num] = ({'API': api, 'PE': pe, 'DS': data},
conf_file)
def start_service(self, name, conf_file):
service = '--' + name
node = name + '-replica-node'
args = ['bin/congress-server', service,
'--node-id', node, '--config-file', conf_file]
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=helper.root_path())
return p
def stop_replica(self, port_num):
procs, conf_file = self.replicas[port_num]
# Using proc.terminate() will block at proc.wait(), no idea why yet
# kill all processes
for p in procs.values():
p.kill()
p.wait()
os.unlink(conf_file)
self.replicas[port_num] = (None, conf_file)
self._cleanup_replica()
def create_client(self, client_type):
creds = credentials.get_configured_admin_credentials('identity_admin')
auth_prov = tempestmanager.get_auth_provider(creds)
return policy_client.PolicyClient(
auth_prov, client_type,
CONF.identity.region)
def _check_replica_server_status(self, client):
try:
LOG.debug("Check replica server status")
client.list_policy()
LOG.debug("replica server ready")
return True
except exceptions.Unauthorized:
LOG.debug("connection refused")
return False
except (socket.error, urllib3_exceptions.MaxRetryError):
LOG.debug("Replica server not ready")
return False
except Exception:
raise
return False
def find_fake(self, client):
datasources = client.list_datasources()
for r in datasources['results']:
if r['name'] == 'fake':
LOG.debug('existing fake driver: %s', str(r['id']))
return r['id']
return None
def _check_resource_exists(self, client, resource):
try:
body = None
if resource == 'datasource':
LOG.debug("Check datasource exists")
body = self.client.list_datasource_status('fake')
else:
LOG.debug("Check policy exists")
body = self.client.list_policy_status('fake')
LOG.debug("resource status: %s", str(body))
except exceptions.NotFound:
LOG.debug("resource 'fake' not found")
return False
return True
def _check_resource_missing(self, client, resource):
return not self._check_resource_exists(client, resource)
def create_fake(self, client):
# Create fake datasource if it does not exist. Returns the
# fake datasource id.
fake_id = self.find_fake(client)
if fake_id:
return fake_id
item = {'id': None,
'name': 'fake',
'driver': 'fake_datasource',
'config': {"username": "fakeu",
"tenant_name": "faket",
"password": "fakep",
"auth_url": "http://127.0.0.1:5000/v2"},
'description': 'bar',
'enabled': True}
ret = client.create_datasource(item)
LOG.debug('created fake driver: %s', str(ret['id']))
return ret['id']
@decorators.attr(type='smoke')
def test_datasource_db_sync_add_remove(self):
# Verify that a replica adds a datasource when a datasource
# appears in the database.
replica_server = False
try:
# Check fake if exists. else create
fake_id = self.create_fake(self.client)
# Start replica
self.start_replica(CONF.congressha.replica_port)
replica_client = self.create_client(CONF.congressha.replica_type)
# Check replica server status
if not test_utils.call_until_true(
func=lambda: self._check_replica_server_status(
replica_client),
duration=60, sleep_for=1):
for port_num in self.replicas:
procs = self.replicas[port_num][0]
for service_key in procs:
output, error = procs[service_key].communicate()
LOG.debug("Replica port %s service %s logs: %s",
port_num,
service_key,
six.StringIO(output.decode()).getvalue())
raise exceptions.TimeoutException("Replica Server not ready")
# Relica server is up
replica_server = True
# primary server might sync later than replica server due to
# diff in datasource sync interval(P-30, replica-5). So checking
# replica first
# Verify that replica server synced fake dataservice and policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
replica_client, 'datasource'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica doesn't have fake dataservice, data sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
replica_client, 'policy'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica doesn't have fake policy, policy sync failed")
# Verify that primary server synced fake dataservice and policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
self.client, 'datasource'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary doesn't have fake dataservice, data sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
self.client, 'policy'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary doesn't have fake policy, policy sync failed")
# Remove fake from primary server instance.
LOG.debug("removing fake datasource %s", str(fake_id))
self.client.delete_datasource(fake_id)
# Verify that replica server has no fake datasource and fake policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
replica_client, 'datasource'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica still has fake dataservice, sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
replica_client, 'policy'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica still fake policy, policy synchronizer failed")
LOG.debug("removed fake datasource from replica instance")
# Verify that primary server has no fake datasource and fake policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
self.client, 'datasource'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary still has fake dataservice, sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
self.client, 'policy'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary still fake policy, policy synchronizer failed")
LOG.debug("removed fake datasource from primary instance")
finally:
if replica_server:
self.stop_replica(CONF.congressha.replica_port)

View File

@ -1,49 +0,0 @@
# Copyright (c) 2015 Hewlett Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import tenacity
@tenacity.retry(stop=tenacity.stop_after_attempt(20),
wait=tenacity.wait_fixed(1))
def retry_check_function_return_value(f, expected_value, error_msg=None):
"""Check if function f returns expected value."""
if not error_msg:
error_msg = 'Expected value "%s" not found' % expected_value
r = f()
if r != expected_value:
raise Exception(error_msg)
def retry_on_exception(f):
"""Decorator to retry on an exception."""
def wrapper():
try:
return f()
except Exception:
return False
return wrapper
def root_path():
"""Return path to root of source code."""
x = os.path.realpath(__file__)
x, y = os.path.split(x) # drop "helper.py"
x, y = os.path.split(x) # drop "scenario"
x, y = os.path.split(x) # drop "tests"
x, y = os.path.split(x) # drop "congress_tempest_tests"
return x

View File

@ -1,968 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# local congress copy of upstream file. Maintained temporarily while
# file undergoes refactoring upstream.
import subprocess
import netaddr
from oslo_log import log
from oslo_utils import netutils
from tempest.common import compute
from tempest.common import image as common_image
from tempest.common.utils.linux import remote_client
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_floating_ips_client = (
cls.os_primary.compute_floating_ips_client)
if CONF.service_available.glance:
# Check if glance v1 is available to determine which client to use.
if CONF.image_feature_enabled.api_v1:
cls.image_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.image_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
# Compute image client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.keypairs_client = cls.os_primary.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.os_primary.compute_security_groups_client)
cls.compute_security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.servers_client = cls.os_primary.servers_client
cls.interface_client = cls.os_primary.interfaces_client
# Neutron network client
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.routers_client = cls.os_primary.routers_client
cls.subnets_client = cls.os_primary.subnets_client
cls.floating_ips_client = cls.os_primary.floating_ips_client
cls.security_groups_client = cls.os_primary.security_groups_client
cls.security_group_rules_client = (
cls.os_primary.security_group_rules_client)
if CONF.volume_feature_enabled.api_v2:
cls.volumes_client = cls.os_primary.volumes_v2_client
cls.snapshots_client = cls.os_primary.snapshots_v2_client
else:
cls.volumes_client = cls.os_primary.volumes_client
cls.snapshots_client = cls.os_primary.snapshots_client
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
self.addCleanup(client.delete_keypair, name)
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until='ACTIVE',
clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
# tests need to be run regardless of the run_validation and
# validatable parameters and thus until the ssh validation job
# becomes voting in CI. The test resources management and IP
# association are taken care of in the scenario tests.
# Therefore, the validatable parameter is set to false in all
# those tests. In this way create_server just return a standard
# server and the scenario tests always perform ssh checks.
# Needed for the cross_tenant_traffic test:
if clients is None:
clients = self.os_primary
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-server")
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
if 'security_groups' in kwargs:
security_groups = \
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = kwargs.pop('networks', [])
else:
networks = []
# If there are no networks passed to us we look up
# for the project's private networks and create a port.
# The same behaviour as we would expect when passing
# the call to the clients with no networks
if not networks:
networks = clients.networks_client.list_networks(
**{'router:external': False, 'fields': 'id'})['networks']
# It's net['uuid'] if networks come from kwargs
# and net['id'] if they come from
# clients.networks_client.list_networks
for net in networks:
net_id = net.get('uuid', net.get('id'))
if 'port' not in net:
port = self._create_port(network_id=net_id,
client=clients.ports_client,
**create_port_body)
ports.append({'port': port['id']})
else:
ports.append({'port': net['port']})
if ports:
kwargs['networks'] = ports
self.ports = ports
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
clients,
tenant_network=tenant_network,
wait_until=wait_until,
name=name, flavor=flavor,
image_id=image_id, **kwargs)
self.addCleanup(waiters.wait_for_server_termination,
clients.servers_client, body['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, body['id'])
server = clients.servers_client.show_server(body['id'])['server']
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None):
if size is None:
size = CONF.volume.volume_size
if imageRef:
image = self.compute_images_client.show_image(imageRef)['image']
min_disk = image.get('minDisk')
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
kwargs = {'display_name': name,
'snapshot_id': snapshot_id,
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.volumes_client.delete_volume, volume['id'])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, ip_address, username=None, private_key=None):
"""Get a SSH client to a remote server
@param ip_address the server floating or fixed IP address to use
for ssh validation
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@return a RemoteClient object
"""
if username is None:
username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip_address, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip_address,
'error': e})
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
self._log_console_output()
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
}
if CONF.image_feature_enabled.api_v1:
params['is_public'] = 'False'
params['properties'] = properties
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params['visibility'] = 'private'
# Additional properties are flattened out in the v2 API.
params.update(properties)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
with open(path, 'rb') as image_file:
if CONF.image_feature_enabled.api_v1:
self.image_client.update_image(image['id'], data=image_file)
else:
self.image_client.store_image_file(image['id'], image_file)
return image['id']
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
try:
console_output = self.servers_client.get_console_output(
server['id'])['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
except lib_exc.NotFound:
LOG.debug("Server %s disappeared(deleted) while looking "
"for the console log", server['id'])
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1']
if mtu:
cmd += [
# don't fragment
'-M', 'do',
# ping receives just the size of ICMP payload
'-s', str(net_utils.get_ping_payload_size(mtu, 4))
]
cmd.append(ip_address)
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
caller = test_utils.find_test_caller()
LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
' expected result is %(should_succeed)s', {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'should_succeed':
'reachable' if should_succeed else 'unreachable'
})
result = test_utils.call_until_true(ping, timeout, 1)
LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
'ping result is %(result)s', {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True,
mtu=None):
"""Check server connectivity
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:param mtu: network MTU to use for connectivity validation
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect,
mtu=mtu),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
with the neutron API.
This Class also enforces using Neutron instead of novanetwork.
Subclassed tests will be skipped if Neutron is not enabled
"""
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(NetworkScenarioTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
def _create_network(self, networks_client=None,
tenant_id=None,
namestart='network-smoke-',
port_security_enabled=True):
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
tenant_id = networks_client.tenant_id
name = data_utils.rand_name(namestart)
network_kwargs = dict(name=name, tenant_id=tenant_id)
# Neutron disables port security by default so we have to check the
# config before trying to create the network with port_security_enabled
if CONF.network_feature_enabled.port_security:
network_kwargs['port_security_enabled'] = port_security_enabled
result = networks_client.create_network(**network_kwargs)
network = result['network']
self.assertEqual(network['name'], name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
networks_client.delete_network,
network['id'])
return network
def _create_subnet(self, network, subnets_client=None,
routers_client=None, namestart='subnet-smoke',
**kwargs):
"""Create a subnet for the given network
within the cidr block configured for tenant networks.
"""
if not subnets_client:
subnets_client = self.subnets_client
if not routers_client:
routers_client = self.routers_client
def cidr_in_use(cidr, tenant_id):
"""Check cidr existence
:returns: True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self.os_admin.subnets_client.list_subnets(
tenant_id=tenant_id, cidr=cidr)['subnets']
return len(cidr_in_use) != 0
ip_version = kwargs.pop('ip_version', 4)
if ip_version == 6:
tenant_cidr = netaddr.IPNetwork(
CONF.network.project_network_v6_cidr)
num_bits = CONF.network.project_network_v6_mask_bits
else:
tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
num_bits = CONF.network.project_network_mask_bits
result = None
str_cidr = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(num_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
continue
subnet = dict(
name=data_utils.rand_name(namestart),
network_id=network['id'],
tenant_id=network['tenant_id'],
cidr=str_cidr,
ip_version=ip_version,
**kwargs
)
try:
result = subnets_client.create_subnet(**subnet)
break
except lib_exc.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = result['subnet']
self.assertEqual(subnet['cidr'], str_cidr)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
subnets_client.delete_subnet, subnet['id'])
return subnet
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
ports = self.os_admin.ports_client.list_ports(
device_id=server['id'], fixed_ip=ip_addr)['ports']
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
p_status = ['ACTIVE']
# NOTE(vsaienko) With Ironic, instances live on separate hardware
# servers. Neutron does not bind ports for Ironic instances, as a
# result the port remains in the DOWN state.
# TODO(vsaienko) remove once bug: #1599836 is resolved.
if getattr(CONF.service_available, 'ironic', False):
p_status.append('DOWN')
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
if netutils.is_valid_ipv4(fxip["ip_address"])
and p['status'] in p_status]
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
self.assertNotEqual(0, len(port_map),
"No IPv4 addresses found in: %s" % ports)
self.assertEqual(len(port_map), 1,
"Found multiple IPv4 addresses: %s. "
"Unable to determine which port to target."
% port_map)
return port_map[0]
def _get_network_by_name(self, network_name):
net = self.os_admin.networks_client.list_networks(
name=network_name)['networks']
self.assertNotEqual(len(net), 0,
"Unable to get network by name: %s" % network_name)
return net[0]
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Create a floating IP and associates to a resource/port on Neutron"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.floating_ips_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = result['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_floatingip,
floating_ip['id'])
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id, _ = self._get_server_port_id_and_ip4(server)
kwargs = dict(port_id=port_id)
floating_ip = self.floating_ips_client.update_floatingip(
floating_ip['id'], **kwargs)['floatingip']
self.assertEqual(port_id, floating_ip['port_id'])
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
""":param floating_ip: floating_ips_client.create_floatingip"""
kwargs = dict(port_id=None)
floating_ip = self.floating_ips_client.update_floatingip(
floating_ip['id'], **kwargs)['floatingip']
self.assertIsNone(floating_ip['port_id'])
return floating_ip
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True,
servers_for_debug=None):
if not CONF.network.project_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in server['addresses'].items():
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
private_key,
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:param nic: specific network interface to ping from
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest, nic=nic)
except lib_exc.SSHExecCommandFailed:
LOG.warning('Failed to ping IP: %s via a ssh connection '
'from: %s.', dest, source.ssh_client.host)
return not should_succeed
return should_succeed
return test_utils.call_until_true(ping_remote,
CONF.validation.ping_timeout,
1)
def _create_security_group(self, security_group_rules_client=None,
tenant_id=None,
namestart='secgroup-smoke',
security_groups_client=None):
if security_group_rules_client is None:
security_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
if tenant_id is None:
tenant_id = security_groups_client.tenant_id
secgroup = self._create_empty_security_group(
namestart=namestart, client=security_groups_client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule(
security_group_rules_client=security_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client)
for rule in rules:
self.assertEqual(tenant_id, rule['tenant_id'])
self.assertEqual(secgroup['id'], rule['security_group_id'])
return secgroup
def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: the created security group
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
result = client.create_security_group(**sg_dict)
secgroup = result['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(tenant_id, secgroup['tenant_id'])
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_security_group, secgroup['id'])
return secgroup
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: default secgroup for given tenant
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in list(client.list_security_groups().values())[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertGreater(len(sgs), 0, msg)
return sgs[0]
def _create_security_group_rule(self, secgroup=None,
sec_group_rules_client=None,
tenant_id=None,
security_groups_client=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: the security group.
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if sec_group_rules_client is None:
sec_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
if not tenant_id:
tenant_id = security_groups_client.tenant_id
if secgroup is None:
secgroup = self._default_security_group(
client=security_groups_client, tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup['id'],
tenant_id=secgroup['tenant_id'])
ruleset.update(kwargs)
sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
sg_rule = sg_rule['security_group_rule']
self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
return sg_rule
def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
secgroup=None,
security_groups_client=None):
"""Create loginable security group rule
This function will create:
1. egress and ingress tcp port 22 allow rule in order to allow ssh
access for ipv4.
2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
"""
if security_group_rules_client is None:
security_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
),
dict(
# ipv6-icmp for ping6
protocol='icmp',
ethertype='IPv6',
)
]
sec_group_rules_client = security_group_rules_client
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
sec_group_rules_client=sec_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client,
**ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
if msg not in ex._error_string:
raise ex
else:
self.assertEqual(r_direction, sg_rule['direction'])
rules.append(sg_rule)
return rules
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return body['router']
elif network_id:
router = self._create_router(client, tenant_id)
kwargs = {'external_gateway_info': dict(network_id=network_id)}
router = client.update_router(router['id'], **kwargs)['router']
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
tenant_id=tenant_id)
router = result['router']
self.assertEqual(router['name'], name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_router,
router['id'])
return router
# def _update_router_admin_state(self, router, admin_state_up):
# kwargs = dict(admin_state_up=admin_state_up)
# router = self.routers_client.update_router(
# router['id'], **kwargs)['router']
# self.assertEqual(admin_state_up, router['admin_state_up'])
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
port_security_enabled=True):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
if CONF.network.shared_physical_network:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise lib_exc.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(
networks_client=networks_client,
tenant_id=tenant_id,
port_security_enabled=port_security_enabled)
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
subnets_client=subnets_client,
routers_client=routers_client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
if not routers_client:
routers_client = self.routers_client
router_id = router['id']
routers_client.add_router_interface(router_id,
subnet_id=subnet['id'])
# save a cleanup job to remove this association between
# router and subnet
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
routers_client.remove_router_interface, router_id,
subnet_id=subnet['id'])
return network, subnet, router

View File

@ -1,288 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import random
import re
import string
from oslo_log import log as logging
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest import manager as tempestmanager
from congress_tempest_tests.services.congress_network import qos_client
from congress_tempest_tests.services.congress_network import qos_rule_client
from congress_tempest_tests.services.policy import policy_client
# use local copy of tempest scenario manager during upstream refactoring
from congress_tempest_tests.tests.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
def get_datasource_id(client, name):
datasources = client.list_datasources()
for datasource in datasources['results']:
if datasource['name'] == name:
return datasource['id']
raise Exception("Datasource %s not found." % name)
# Note: these tests all use neutron today so we mix with that.
class ScenarioPolicyBase(manager.NetworkScenarioTest):
@classmethod
def setUpClass(cls):
super(ScenarioPolicyBase, cls).setUpClass()
# auth provider for admin credentials
creds = credentials.get_configured_admin_credentials('identity_admin')
auth_prov = tempestmanager.get_auth_provider(creds)
cls.setup_required_clients(auth_prov)
@classmethod
def setup_required_clients(cls, auth_prov):
# Get congress client
cls.os_admin.congress_client = policy_client.PolicyClient(
auth_prov, "policy", CONF.identity.region)
cls.os_admin.qos_client = qos_client.QosPoliciesClient(
auth_prov, "network", CONF.identity.region)
cls.os_admin.qos_rule_client = qos_rule_client.QosRuleClient(
auth_prov, "network", CONF.identity.region)
# FIXME(ekcs): disabled right now because the required client has been
# removed from ceilometer repo along with the v2 API
# # Get telemtery_client
# if getattr(CONF.service_available, 'ceilometer', False):
# import ceilometer.tests.tempest.service.client as telemetry_client
# cls.os_admin.telemetry_client = (
# telemetry_client.TelemetryClient(
# auth_prov,
# CONF.telemetry.catalog_type, CONF.identity.region,
# endpoint_type=CONF.telemetry.endpoint_type))
# Get alarms client
if getattr(CONF.service_available, 'aodh_plugin', False):
import aodh.tests.tempest.service.client as alarms_client
cls.os_admin.alarms_client = (
alarms_client.AlarmingClient(
auth_prov,
CONF.alarming_plugin.catalog_type, CONF.identity.region,
CONF.alarming_plugin.endpoint_type))
def _setup_network_and_servers(self):
self.security_group = self._create_security_group()
self.network, self.subnet, self.router = self.create_networks()
self.check_networks()
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network)
self._check_tenant_network_connectivity()
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""Check for newly created network/subnet/router.
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets].
"""
seen_nets = self.os_admin.networks_client.list_networks()
seen_names = [n['name'] for n in seen_nets['networks']]
seen_ids = [n['id'] for n in seen_nets['networks']]
self.assertIn(self.network['name'], seen_names)
self.assertIn(self.network['id'], seen_ids)
if self.subnet:
seen_subnets = self.os_admin.subnets_client.list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']]
seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']]
self.assertIn(self.network['id'], seen_net_ids)
self.assertIn(self.subnet['id'], seen_subnet_ids)
if self.router:
seen_routers = self.os_admin.routers_client.list_routers()
seen_router_ids = [n['id'] for n in seen_routers['routers']]
seen_router_names = [n['name'] for n in seen_routers['routers']]
self.assertIn(self.router['name'],
seen_router_names)
self.assertIn(self.router['id'],
seen_router_ids)
def check_datasource_no_error(self, datasource_name):
"""Check that datasource has no error on latest update"""
ds_status = self.os_admin.congress_client.list_datasource_status(
datasource_name)
if (ds_status['initialized'] == 'True' and
ds_status['number_of_updates'] != '0' and
ds_status['last_error'] == 'None'):
return True
else:
LOG.debug('datasource %s not initialized, not polled, or shows '
'error. Full status: %s', datasource_name, ds_status)
return False
def _create_server(self, name, network):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [
{'uuid': network['id']},
],
'key_name': keypair['name'],
'security_groups': security_groups,
}
server = self.create_server(name=name, wait_until='ACTIVE',
**create_kwargs)
self.servers.append(server)
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_tenant_network_connectivity(self):
ssh_login = CONF.validation.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(ScenarioPolicyBase, self)._check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def _create_and_associate_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
if should_connect:
private_key = self._get_server_key(server)
# call the common method in the parent class
super(ScenarioPolicyBase, self)._check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke-')
# create a new server for the floating ip
server = self._create_server(name, self.network)
self._associate_floating_ip(floating_ip, server)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, server)
def _create_new_network(self):
self.new_net = self._create_network(tenant_id=self.tenant_id)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
ipatxt = ssh_client.exec_command("ip address")
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network):
"""via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes.
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
ports = self.os_admin.ports_client.list_ports(
tenant_id=server['tenant_id'], network_id=network.id)['ports']
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in ports
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip, internal_ips)
def _check_network_external_connectivity(self):
"""ping public network default gateway to imply external conn."""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
subnet = self.os_admin.subnets_client.list_subnets(
network_id=CONF.network.public_network_id)['subnets']
self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
external_ips = [subnet[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
try:
self.assertTrue(self._check_remote_connectivity(ssh_source,
remote_ip),
"Timed out waiting for %s to become "
"reachable" % remote_ip)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
raise
def _create_random_policy(self):
policy_name = "nova_%s" % ''.join(random.choice(string.ascii_lowercase)
for x in range(10))
body = {"name": policy_name}
resp = self.os_admin.congress_client.create_policy(body)
self.addCleanup(self.os_admin.congress_client.delete_policy,
resp['id'])
return resp['name']
def _create_policy_rule(self, policy_name, rule, rule_name=None,
comment=None):
body = {'rule': rule}
if rule_name:
body['name'] = rule_name
if comment:
body['comment'] = comment
client = self.os_admin.congress_client
response = client.create_policy_rule(policy_name, body)
if response:
self.addCleanup(client.delete_policy_rule, policy_name,
response['id'])
return response
else:
raise Exception('Failed to create policy rule (%s, %s)'
% (policy_name, rule))

View File

@ -1,287 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_tests.tests.scenario import helper
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestPolicyBasicOps(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestPolicyBasicOps, cls).skip_checks()
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestPolicyBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _create_test_server(self, name=None):
image_ref = CONF.compute.image_ref
flavor_ref = CONF.compute.flavor_ref
keypair = self.create_keypair()
security_group = self._create_security_group()
security_groups = [{'name': security_group['name']}]
create_kwargs = {'key_name': keypair['name'],
'security_groups': security_groups}
instance = self.create_server(name=name,
image_id=image_ref,
flavor=flavor_ref,
wait_until='ACTIVE',
**create_kwargs)
return instance
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_execution_action(self):
metadata = {'testkey1': 'value3'}
res = {'meta': {'testkey1': 'value3'}}
server = self._create_test_server()
congress_client = self.os_admin.congress_client
servers_client = self.os_admin.servers_client
policy = self._create_random_policy()
service = 'nova'
action = 'servers.set_meta'
action_args = {'args': {'positional': [],
'named': {'server': server['id'],
'metadata': metadata}}}
body = action_args
f = lambda: servers_client.show_server_metadata_item(server['id'],
'testkey1')
# execute via datasource api
body.update({'name': action})
congress_client.execute_datasource_action(service, "execute", body)
helper.retry_check_function_return_value(f, res)
# execute via policy api
body.update({'name': service + ':' + action})
congress_client.execute_policy_action(policy, "execute", False,
False, body)
helper.retry_check_function_return_value(f, res)
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_policy_basic_op(self):
self._setup_network_and_servers()
body = {"rule": "port_security_group(id, security_group_name) "
":-neutronv2:ports(id, tenant_id, name, network_id,"
"mac_address, admin_state_up, status, device_id, "
"device_owner),"
"neutronv2:security_group_port_bindings(id, "
"security_group_id), neutronv2:security_groups("
"security_group_id, tenant_id1, security_group_name,"
"description)"}
results = self.os_admin.congress_client.create_policy_rule(
'classification', body)
rule_id = results['id']
self.addCleanup(
self.os_admin.congress_client.delete_policy_rule,
'classification', rule_id)
# Find the ports of on this server
ports = self.os_admin.ports_client.list_ports(
device_id=self.servers[0]['id'])['ports']
def check_data():
results = self.os_admin.congress_client.list_policy_rows(
'classification', 'port_security_group')
for row in results['results']:
if (row['data'][0] == ports[0]['id'] and
row['data'][1] ==
self.servers[0]['security_groups'][0]['name']):
return True
else:
return False
time.sleep(65) # sleep for replicated PE sync
# Note(ekcs): do not use retry because we want to make sure the call
# succeeds on the first try after adequate time.
# If retry used, it may pass based on succeding on one replica but
# failing on all others.
self.assertTrue(check_data(),
"Data did not converge in time or failure in server")
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_reactive_enforcement(self):
servers_client = self.os_admin.servers_client
server_name = 'server_under_test'
server = self._create_test_server(name=server_name)
policy_name = self._create_random_policy()
meta_key = 'meta_test_key1'
meta_val = 'value1'
meta_data = {'meta': {meta_key: meta_val}}
rules = [
'execute[nova:servers_set_meta(id, "%s", "%s")] :- '
'test_servers(id)' % (meta_key, meta_val),
'test_servers(id) :- '
'nova:servers(id, name, host_id, status, tenant_id,'
'user_id, image_id, flavor_id, zone, host_name),'
'equal(name, "%s")' % server_name]
for rule in rules:
self._create_policy_rule(policy_name, rule)
f = lambda: servers_client.show_server_metadata_item(server['id'],
meta_key)
time.sleep(80) # sleep for replicated PE sync
# Note: seems reactive enforcement takes a bit longer
# succeeds on the first try after adequate time.
# If retry used, it may pass based on succeding on one replica but
# failing on all others.
self.assertEqual(f(), meta_data)
class TestPolicyLibraryBasicOps(manager_congress.ScenarioPolicyBase):
@decorators.attr(type='smoke')
def test_policy_library_basic_op(self):
response = self.os_admin.congress_client.list_library_policy()
initial_state = response['results']
self.assertGreater(
len(initial_state), 0, 'library policy shows no policies, '
'indicating failed load-on-startup.')
test_policy = {
"name": "test_policy",
"description": "test policy description",
"kind": "nonrecursive",
"abbreviation": "abbr",
"rules": [{"rule": "p(x) :- q(x)", "comment": "test comment",
"name": "test name"},
{"rule": "p(x) :- q2(x)", "comment": "test comment2",
"name": "test name2"}]
}
response = self.os_admin.congress_client.create_library_policy(
test_policy)
policy_id = response['id']
test_policy['id'] = policy_id
def delete_if_found(id_):
try:
self.os_admin.congress_client.delete_library_policy(id_)
except exceptions.NotFound:
pass
self.addCleanup(delete_if_found, policy_id)
response = self.os_admin.congress_client.list_library_policy()
new_state = response['results']
self.assertEqual(len(initial_state) + 1, len(new_state),
'new library policy not reflected in list results')
self.assertIn(test_policy, new_state,
'new library policy not reflected in list results')
self.os_admin.congress_client.delete_library_policy(policy_id)
response = self.os_admin.congress_client.list_library_policy()
new_state = response['results']
self.assertEqual(len(initial_state), len(new_state),
'library policy delete not reflected in list results')
self.assertNotIn(test_policy, new_state,
'library policy delete not reflected in list results')
@decorators.attr(type='smoke')
def test_create_library_policies(self):
'''test the library policies by loading into policy engine'''
# list library policies (by name) to skip in this test, perhaps
# because it depends on datasources not available in gate
skip_names_list = []
response = self.os_admin.congress_client.list_library_policy()
library_policies = response['results']
for library_policy in library_policies:
if library_policy['name'] not in skip_names_list:
resp = self.os_admin.congress_client.create_policy(
body=None, params={'library_policy': library_policy['id']})
self.assertEqual(resp.response['status'], '201',
'Policy activation failed')
self.addCleanup(self.os_admin.congress_client.delete_policy,
resp['id'])
class TestCongressDataSources(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestCongressDataSources, cls).skip_checks()
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def test_all_loaded_datasources_are_initialized(self):
@helper.retry_on_exception
def _check_all_datasources_are_initialized():
datasources = self.os_admin.congress_client.list_datasources()
for datasource in datasources['results']:
results = (
self.os_admin.congress_client.list_datasource_status(
datasource['id']))
if results['initialized'] != 'True':
return False
return True
if not test_utils.call_until_true(
func=_check_all_datasources_are_initialized,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
def test_all_datasources_have_tables(self):
@helper.retry_on_exception
def check_data():
datasources = self.os_admin.congress_client.list_datasources()
for datasource in datasources['results']:
results = (
self.os_admin.congress_client.list_datasource_tables(
datasource['id']))
# NOTE(arosen): if there are no results here we return false as
# there is something wrong with a driver as it doesn't expose
# any tables.
if not results['results']:
return False
return True
if not test_utils.call_until_true(func=check_data,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -21,7 +21,6 @@ classifier =
[files]
packages =
congress
congress_tempest_tests
antlr3runtime/Python/antlr3
antlr3runtime/Python3/antlr3
data_files =
@ -36,7 +35,6 @@ setup-hooks =
autodoc_index_modules = True
autodoc_exclude_modules =
congress.db.migration.alembic_migrations.*
congress_tempest_tests.*
thirdparty.*
antlr3runtime.*
congress.datalog.Python2.*
@ -61,10 +59,6 @@ console_scripts =
congress-db-manage = congress.db.migration.cli:main
congress-cfg-validator-agt = congress.cfg_validator.agent.agent:main
tempest.test_plugins =
congress_tests = congress_tempest_tests.plugin:CongressTempestPlugin
[build_sphinx]
all_files = 1
build-dir = doc/build