Enrich functional test suite

This beefs up the functional test suite to do some tests on a
tenant router.  The test can now creates one-off tenants to be used
in the tests.  The new tests act entirely on behalf of the test
router and attempt on only use admin-level things when required.

Partially-Implements: blueprint ci-updates-mitaka

Change-Id: I26fa70f877522f09075dc87583f2359cc0dcaf41
This commit is contained in:
Adam Gandelman 2015-09-02 18:53:24 -07:00
parent 6cf7784f51
commit 84b0928c4d
12 changed files with 744 additions and 143 deletions

2
.gitignore vendored
View File

@ -34,7 +34,7 @@ AUTHORS
ChangeLog
orchestrator.ini.sample
test.conf
astara/test/functional/test.conf
*.swp

View File

@ -47,11 +47,12 @@ def _get_proxyless_session():
return s
def is_alive(host, port):
def is_alive(host, port, timeout=None):
timeout = timeout or cfg.CONF.alive_timeout
path = ASTARA_BASE_PATH + 'firewall/rules'
try:
s = _get_proxyless_session()
r = s.get(_mgt_url(host, port, path), timeout=cfg.CONF.alive_timeout)
r = s.get(_mgt_url(host, port, path), timeout=timeout)
if r.status_code == 200:
return True
except Exception as e:

View File

@ -1,109 +1,549 @@
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import mock
import netaddr
import os
import six
import subprocess
import testtools
import time
from oslo_config import cfg
from oslo_log import log as logging
from astara.api import astara_client
from novaclient import client as _novaclient
from keystoneclient.v2_0 import client as _keystoneclient
from neutronclient.v2_0 import client as _neutronclient
from novaclient import client as _novaclient
from keystoneclient import exceptions as ksc_exceptions
from neutronclient.common import exceptions as neutron_exceptions
from tempest_lib.common.utils import data_utils
from astara.test.functional import config
DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'test.conf')
DEFAULT_ACTIVE_TIMEOUT = 340
DEFAULT_DELETE_TIMEOUT = 60
config.register_opts()
CONF = cfg.CONF
logging.register_options(CONF)
LOG = None
def parse_config():
config_file = os.environ.get('AKANDA_TEST_CONFIG',
DEFAULT_CONFIG)
cfg.CONF(
[], project='astara-orchestrator-functional',
default_config_files=[config_file])
logging.set_defaults(default_log_levels=[
'paramiko.transport=INFO',
'neutronclient=WARN',
'keystoneclient=WARN',
])
logging.setup(CONF, 'astara_functional')
global LOG
LOG = logging.getLogger(__name__)
class ClientManager(object):
"""A client manager using specified credentials"""
def __init__(self, username, password, tenant_name, auth_url):
parse_config()
self.username = username
self.password = password
self.tenant_name = tenant_name
self.auth_url = auth_url
self._keystoneclient = None
self._neutronclient = None
self._novaclient = None
@property
def novaclient(self):
if not self._novaclient:
self._novaclient = _novaclient.Client(
version=2,
username=self.username,
api_key=self.password,
project_id=self.tenant_name,
auth_url=self.auth_url,
auth_system='keystone',
auth_plugin='password',
)
return self._novaclient
@property
def neutronclient(self):
if not self._neutronclient:
self._neutronclient = _neutronclient.Client(
username=self.username,
password=self.password,
tenant_name=self.tenant_name,
auth_url=self.auth_url,
auth_system='keystone',
)
return self._neutronclient
@property
def keystoneclient(self):
if not self._keystoneclient:
self._keystoneclient = _keystoneclient.Client(
username=self.username,
password=self.password,
tenant_name=self.tenant_name,
auth_url=self.auth_url
)
return self._keystoneclient
@property
def tenant_id(self):
return self.keystoneclient.tenant_id
class ApplianceServerNotFound(Exception):
pass
class ApplianceServerTimeout(Exception):
pass
class AdminClientManager(ClientManager):
"""A client manager using credentials loaded from test.conf, which
are assumed to be admin.
"""
def __init__(self):
parse_config()
super(AdminClientManager, self).__init__(
username=CONF.os_username,
password=CONF.os_password,
tenant_name=CONF.os_tenant_name,
auth_url=CONF.os_auth_url,
)
def get_router_appliance_server(self, router_uuid, retries=10,
wait_for_active=False):
"""Returns a Nova server object for router"""
LOG.debug(
'Looking for nova backing instance for resource %s',
router_uuid)
for i in six.moves.range(retries):
service_instance = \
[instance for instance in
self.novaclient.servers.list(
search_opts={
'all_tenants': 1,
'tenant_id': CONF.service_tenant_id}
) if router_uuid in instance.name]
if service_instance:
service_instance = service_instance[0]
LOG.debug(
'Found backing instance for resource %s: %s',
router_uuid, service_instance)
break
LOG.debug('Backing instance not found, will retry %s/%s',
i, retries)
time.sleep(1)
else:
raise ApplianceServerNotFound(
'Could not get nova server for router %s' % router_uuid)
if wait_for_active:
LOG.debug('Waiting for backing instance %s to become ACTIVE',
service_instance)
for i in six.moves.range(CONF.appliance_active_timeout):
service_instance = self.novaclient.servers.get(
service_instance.id)
if service_instance.status == 'ACTIVE':
LOG.debug('Instance %s status==ACTIVE', service_instance)
return service_instance
else:
LOG.debug('Instance %s status==%s, will wait',
service_instance, service_instance.status)
time.sleep(1)
raise ApplianceServerTimeout(
'Timed out waiting for backing instance of %s %s to become '
'ACTIVE' % router_uuid)
else:
return service_instance
class TestTenant(object):
def __init__(self):
parse_config()
self.username = data_utils.rand_name(name='user', prefix='akanda')
self.user_id = None
self.password = data_utils.rand_password()
self.tenant_name = data_utils.rand_name(name='tenant', prefix='akanda')
self.tenant_id = None
self._admin_clients = AdminClientManager()
self._admin_ks_client = self._admin_clients.keystoneclient
self.auth_url = self._admin_ks_client.auth_url
# create the tenant before creating its clients.
self._create_tenant()
self.clients = ClientManager(self.username, self.password,
self.tenant_name, self.auth_url)
self._subnets = []
self._routers = []
def _create_tenant(self):
tenant = self._admin_ks_client.tenants.create(self.tenant_name)
self.tenant_id = tenant.id
user = self._admin_ks_client.users.create(name=self.username,
password=self.password,
tenant_id=self.tenant_id)
self.user_id = user.id
LOG.debug('Created new test tenant: %s (%s)',
self.tenant_id, self.user_id)
def setup_networking(self):
""""Create a network + subnet for the tenant. Also creates a router
if required, and attaches the subnet to it.
:returns: a (network dict, router dict) tuple
"""
# NOTE(adam_g): I didn't expect simply creating a network
# to also create a subnet and router automatically, but this
# does?
net_body = {
'network': {
'name': data_utils.rand_name(name='network', prefix='ak'),
'admin_state_up': False,
'tenant_id': self.tenant_id
}}
LOG.debug('Creating network: %s', net_body)
network = self.clients.neutronclient.create_network(net_body)
network = network.get('network')
if not network:
raise Exception('Failed to create default tenant network')
LOG.debug('Created network: %s', network)
if not CONF.astara_auto_add_resources:
addr = netaddr.IPNetwork(CONF.test_subnet_cidr)
subnet_body = {
'subnet': {
'name': data_utils.rand_name(name='subnet', prefix='ak'),
'network_id': network['id'],
'cidr': CONF.test_subnet_cidr,
'ip_version': addr.version,
}
}
LOG.debug('Creating subnet: %s', subnet_body)
subnet = self.clients.neutronclient.create_subnet(
body=subnet_body)['subnet']
LOG.debug('Created subnet: %s', subnet)
router_body = {
'router': {
'name': data_utils.rand_name(name='router', prefix='ak'),
'admin_state_up': True,
'tenant_id': self.tenant_id,
}
}
LOG.debug('Creating router: %s', router_body)
router = self.clients.neutronclient.create_router(
body=router_body)['router']
LOG.debug('Created router: %s', router)
LOG.debug(
'Attaching interface on subnet %s to router %s',
subnet['id'], router['id'])
self.clients.neutronclient.add_interface_router(
router['id'], {'subnet_id': subnet['id']}
)
LOG.debug(
'Attached interface on subnet %s to router %s',
subnet['id'], router['id'])
else:
# routers report as ACTIVE initially (LP: #1491673)
time.sleep(2)
LOG.debug('Waiting for astara auto-created router')
for i in six.moves.range(CONF.appliance_active_timeout):
routers = self.clients.neutronclient.list_routers()
routers = routers.get('routers')
if routers:
router = routers[0]
LOG.debug('Found astara auto-created router: %s', router)
break
else:
LOG.debug(
'Still waiting for auto-creted router. %s/%s',
i, CONF.appliance_active_timeout)
time.sleep(1)
else:
raise Exception('Timed out waiting for default router.')
# routers report as ACTIVE initially (LP: #1491673)
time.sleep(2)
return network, router
def _wait_for_backing_instance_delete(self, resource_id):
i = 1
LOG.debug(
'Waiting on deletion of backing instance for resource %s',
resource_id)
for i in six.moves.range(DEFAULT_DELETE_TIMEOUT):
try:
self._admin_clients.get_router_appliance_server(
resource_id, retries=1)
except ApplianceServerNotFound:
LOG.debug('Backing instance for resource %s deleted',
resource_id)
return
LOG.debug(
'Still waiting for deletion of backing instance for %s'
' , will wait (%s/%s)',
resource_id, i, DEFAULT_DELETE_TIMEOUT)
time.sleep(1)
m = ('Timed out waiting on deletion of backing instance for %s '
'after %s sec.' % (resource_id, DEFAULT_DELETE_TIMEOUT))
LOG.debug(m)
raise ApplianceServerTimeout(m)
def _wait_for_neutron_delete(self, thing, ids):
show = getattr(self.clients.neutronclient, 'show_' + thing)
attempt = 0
max_attempts = 10
for i in ids:
LOG.debug('Waiting for deletion of %s %s', thing, i)
while True:
try:
show(i)
except neutron_exceptions.NeutronClientException as e:
if e.status_code == 404:
LOG.debug('Deletion of %s %s complete', thing, i)
break
if attempt == max_attempts:
raise Exception(
'Timed out waiting for deletion of %s %s after %s sec.'
% (thing, i, max_attempts))
LOG.debug(
'Still waiting for deletion of %s %s, will wait (%s/%s)',
thing, i, attempt, max_attempts)
attempt += 1
time.sleep(1)
# also wait for nova backing instance to delete after routers
if thing in ['router']:
[self._wait_for_backing_instance_delete(i) for i in ids]
def cleanup_neutron(self):
"""Clean tenant environment of neutron resources"""
LOG.debug('Cleaning up created neutron resources')
router_interface_ports = [
p for p in self.clients.neutronclient.list_ports()['ports']
if 'router_interface' in p['device_owner']]
for rip in router_interface_ports:
LOG.debug('Deleting router interface port: %s', rip)
self.clients.neutronclient.remove_interface_router(
rip['device_id'],
body=dict(port_id=router_interface_ports[0]['id']))
astara_router_ports = []
router_ids = [
r['id'] for r in
self.clients.neutronclient.list_routers().get('routers')
]
for rid in router_ids:
for p in ['MGT', 'VRRP']:
name = 'ASTARA:%s:%s' % (p, rid)
astara_router_ports += [
p['id'] for p in
self._admin_clients.neutronclient.list_ports(
name=name).get('ports')]
LOG.debug('Deleting router %s' % rid)
try:
self.clients.neutronclient.delete_router(r['id'])
except neutron_exceptions.NeutronClientException as e:
if e.status_code == 404:
router_ids.remove(rid)
else:
raise e
self._wait_for_neutron_delete('router', router_ids)
time.sleep(2)
port_ids = [
p['id'] for p in
self.clients.neutronclient.list_ports().get('ports')]
for pid in port_ids:
LOG.debug('Deleting port: %s', pid)
try:
self.clients.neutronclient.delete_port(pid)
except neutron_exceptions.NeutronClientException as e:
if e.status_code == 404:
port_ids.remove(pid)
else:
raise e
self._wait_for_neutron_delete('port', port_ids)
subnet_ids = [
s['id']
for s in self.clients.neutronclient.list_subnets().get('subnets')]
for sid in subnet_ids:
LOG.debug('Deleting subnet: %s', sid)
try:
self.clients.neutronclient.delete_subnet(sid)
except neutron_exceptions.NeutronClientException as e:
if e.status_code == 404:
subnet_ids.remove(sid)
else:
raise e
self._wait_for_neutron_delete('subnet', subnet_ids)
# need to make sure the vrrp and mgt ports get deleted
# in time before the delete_network()
for p in astara_router_ports:
try:
self._admin_clients.neutronclient.delete_port(p)
except neutron_exceptions.NeutronClientException as e:
if e.status_code == 404:
astara_router_ports.remove(p)
else:
raise e
self._wait_for_neutron_delete('port', astara_router_ports)
networks = self.clients.neutronclient.list_networks().get('networks')
net_ids = [
n['id'] for n in networks if n['tenant_id'] == self.tenant_id]
for nid in net_ids:
LOG.debug('Deleting network: %s', nid)
try:
self.clients.neutronclient.delete_network(nid)
except neutron_exceptions.NeutronClientException as e:
if e.status_code == 404:
net_ids.remove(nid)
else:
raise e
self._wait_for_neutron_delete('network', net_ids)
def cleanUp(self):
self.cleanup_neutron()
self._admin_ks_client.users.delete(self.user_id)
self._admin_ks_client.tenants.delete(self.tenant_id)
class AstaraFunctionalBase(testtools.TestCase):
_test_tenants = []
def setUp(self):
super(AstaraFunctionalBase, self).setUp()
self.config = self._get_config()
self.ak_cfg = mock.patch.object(astara_client.cfg, 'CONF').start()
self.ak_cfg.alive_timeout = 10
log_format = '%(asctime)s.%(msecs)03d ' + self.id() + ' %(message)s'
cfg.CONF.set_default('logging_default_format_string', log_format)
parse_config()
self.ak_client = astara_client
self.novaclient = _novaclient.Client(
version=2,
username=self.config['os_username'],
api_key=self.config['os_password'],
project_id=self.config['os_tenant_name'],
auth_url=self.config['os_auth_url'],
auth_system='keystone',
auth_plugin='password',
)
self.neutronclient = _neutronclient.Client(
username=self.config['os_username'],
password=self.config['os_password'],
tenant_name=self.config['os_tenant_name'],
auth_url=self.config['os_auth_url'],
auth_strategy='keystone',
)
self.admin_clients = AdminClientManager()
self._management_address = None
def _get_config(self):
config_file = os.environ.get('ASTARA_TEST_CONFIG',
DEFAULT_CONFIG)
config = ConfigParser.SafeConfigParser()
if not config.read(config_file):
self.skipTest('Skipping, no test config found @ %s' %
config_file)
@classmethod
def setUpClass(cls):
cls._test_tenants = []
req_conf_settings = ['os_auth_url', 'os_username', 'os_password',
'os_tenant_name', 'service_tenant_name',
'service_tenant_id', 'appliance_api_port',
'astara_test_router_uuid']
out = {}
for c in req_conf_settings:
try:
out[c] = config.get('functional', c)
except ConfigParser.NoOptionError:
out[c] = None
missing = [k for k, v in out.items() if not v]
if missing:
self.fail('Missing required setting in test.conf (%s)'
(config_file, ','.join(missing)))
@classmethod
def tearDownClass(cls):
try:
[t.cleanUp() for t in cls._test_tenants]
except ksc_exceptions.NotFound:
pass
opt_conf_settings = {
'appliance_active_timeout': DEFAULT_ACTIVE_TIMEOUT,
}
for setting, default in opt_conf_settings.items():
try:
out[setting] = config.get('functional', setting)
except ConfigParser.NoOptionError:
out[setting] = default
return out
@classmethod
def get_tenant(cls):
"""Creates a new test tenant
@property
def management_address(self):
This tenant is assumed to be empty of any cloud resources
and will be destroyed on test class teardown.
"""
tenant = TestTenant()
cls._test_tenants.append(tenant)
return tenant
def get_router_appliance_server(self, router_uuid, retries=10,
wait_for_active=False):
"""Returns a Nova server object for router"""
return self.admin_clients.get_router_appliance_server(
router_uuid, retries, wait_for_active)
def get_management_address(self, router_uuid):
LOG.debug('Getting management address for resource %s', router_uuid)
if self._management_address:
return self._management_address['addr']
# TODO(adam_g): Deal with multiple service instances
service_instance = \
[instance for instance in self.novaclient.servers.list(
search_opts={'all_tenants': 1,
'tenant_id': self.config['service_tenant_id']}
) if instance.name.startswith('ak-')][0]
service_instance = self.get_router_appliance_server(router_uuid)
try:
self._management_address = service_instance.addresses['mgt'][0]
except KeyError:
self.fail('"mgt" port not found on service instance %s (%s)' %
(service_instance.id, service_instance.name))
raise Exception(
'"mgt" port not found on service instance %s (%s)' %
(service_instance.id, service_instance.name))
LOG.debug('Got management address for resource %s', router_uuid)
return self._management_address['addr']
def assert_router_is_active(self, router_uuid=None):
if not router_uuid:
router_uuid = self.config['astara_test_router_uuid']
i = 0
router = self.neutronclient.show_router(router_uuid)['router']
while router['status'] != 'ACTIVE':
if i >= int(self.config['appliance_active_timeout']):
def assert_router_is_active(self, router_uuid):
LOG.debug('Waiting for resource %s to become ACTIVE', router_uuid)
for i in six.moves.range(CONF.appliance_active_timeout):
res = self.admin_clients.neutronclient.show_router(router_uuid)
router = res['router']
if router['status'] == 'ACTIVE':
LOG.debug('Router %s ACTIVE after %s sec.', router_uuid, i)
return
service_instance = self.get_router_appliance_server(
router_uuid)
if service_instance and service_instance.status == 'ERROR':
raise Exception(
'Timed out waiting for router %s to become ACTIVE, '
'current status=%s' % (router_uuid, router['status']))
'Backing instance %s for router %s in ERROR state',
service_instance.id, router_uuid)
LOG.debug(
'Resource %s not active. Status==%s, will wait, %s/%s sec.',
router_uuid, router['status'], i,
CONF.appliance_active_timeout)
time.sleep(1)
router = self.neutronclient.show_router(router_uuid)['router']
i += 1
raise Exception(
'Timed out waiting for router %s to become ACTIVE, '
'current status=%s' % (router_uuid, router['status']))
def ping_router_mgt_address(self, router_uuid):
server = self.get_router_appliance_server(router_uuid)
mgt_interface = server.addresses['mgt'][0]
program = {4: 'ping', 6: 'ping6'}
cmd = [program[mgt_interface['version']], '-c5', mgt_interface['addr']]
LOG.debug('Pinging resource %s: %s', router_uuid, ' '.join(cmd))
try:
subprocess.check_call(cmd)
except:
raise Exception('Failed to ping router with command: %s' % cmd)

View File

@ -0,0 +1,64 @@
# Copyright (c) 2016 Akanda, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
functional_test_opts = [
cfg.StrOpt(
'os_auth_url', required=True,
help='Keystone auth URL'),
cfg.StrOpt(
'os_username', required=True,
help='Username of admin user'),
cfg.StrOpt(
'os_password', required=True,
help='Password of admin user'),
cfg.StrOpt(
'os_tenant_name', required=True,
help='Tenant name of admin user'),
cfg.StrOpt(
'service_tenant_id', required=True,
help='Tenant ID for the astara service user'),
cfg.StrOpt(
'service_tenant_name', required=True,
help='Tenant name of the astara service user'),
cfg.StrOpt(
'appliance_api_port', required=True,
help='The port on which appliance API servers listen'),
cfg.BoolOpt(
'astara_auto_add_resources', required=False, default=True,
help='Whether astara-neutron is configured to auto-add resources'),
cfg.IntOpt(
'appliance_active_timeout', required=False, default=340,
help='Timeout (sec) for an appliance to become ACTIVE'),
cfg.StrOpt(
'test_subnet_cidr', required=False, default='10.1.1.0/24'),
cfg.IntOpt(
'health_check_period', required=False, default=60,
help='Time health_check_period astara-orchestrator is configured to '
'use')
]
def list_opts():
return [
('functional',
itertools.chain(functional_test_opts))]
def register_opts():
cfg.CONF.register_opts(functional_test_opts)

View File

@ -1,17 +1,42 @@
[DEFAULT]
[functional]
# OpenStack credentials for the service user
os_auth_url=http://127.0.0.1:5000/v2.0
os_username=admin
os_password=secretadmin
os_tenant_name=admin
service_tenant_name=service
service_tenant_id=80095e2039db4af0a88351d6360c1977
# The configured appliance API port
appliance_api_port=5000
#
# From astara.test.functional
#
# The UUID of a running astara router that will be used for running
# tests against. Devstack creates this for you but you may have one
# that you wish to use instead. If not supplied here, tools/run_functional.sh
# will attempt to find it for you.
astara_test_router_uuid=650da79d-30ee-460f-bf91-8b7e04a5a5f6
# Password of admin user (string value)
#os_password = <None>
# Tenant ID for the astara service user (string value)
#service_tenant_id = <None>
# Tenant name of admin user (string value)
#os_tenant_name = <None>
# (string value)
#test_subnet_cidr = 10.1.1.0/24
# Whether astara-neutron is configured to auto-add resources (boolean value)
#astara_auto_add_resources = true
# Keystone auth URL (string value)
#os_auth_url = <None>
# Username of admin user (string value)
#os_username = <None>
# Tenant name of the astara service user (string value)
#service_tenant_name = <None>
# Timeout (sec) for an appliance to become ACTIVE (integer value)
#appliance_active_timeout = 340
# The port on which appliance API servers listen (string value)
#appliance_api_port = <None>
# Time health_check_period astara-orchestrator is configured to use (integer
# value)
#health_check_period = 60

View File

@ -1,22 +0,0 @@
from astara.test.functional import base
class AstaraApplianceInstanceTest(base.AstaraFunctionalBase):
"""Basic tests to ensure a service instance and its associated router is
alive and well.
"""
def setUp(self):
super(AstaraApplianceInstanceTest, self).setUp()
# ensure the devstack spawned router instance becomes active before
# starting to run any test cases. this in itself is a test that
# devstack produced a functional router.
self.assert_router_is_active()
def test_appliance_is_alive(self):
self.assertTrue(
self.ak_client.is_alive(
host=self.management_address,
port=self.config['appliance_api_port'],
),
)

View File

@ -0,0 +1,96 @@
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_config import cfg
from oslo_log import log as logging
from astara.test.functional import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class TestAstaraRouter(base.AstaraFunctionalBase):
@classmethod
def setUpClass(cls):
super(TestAstaraRouter, cls).setUpClass()
cls.tenant = cls.get_tenant()
cls.neutronclient = cls.tenant.clients.neutronclient
cls.network, cls.router = cls.tenant.setup_networking()
def setUp(self):
super(TestAstaraRouter, self).setUp()
self.assert_router_is_active(self.router['id'])
# refresh router ref now that its active
router = self.neutronclient.show_router(self.router['id'])
self.router = router['router']
def test_router_recovery(self):
"""
Test that creation of network/subnet/router results in a
correctly plugged appliance, and that manually destroying the
Nova instance results in a new appliance being booted.
"""
# for each subnet that was created during setup, ensure we have a
# router interface added
ports = self.neutronclient.list_ports()['ports']
subnets = self.neutronclient.list_subnets(
network_id=self.network['id'])
subnets = subnets['subnets']
self.assertEqual(len(ports), len(subnets))
for port in ports:
self.assertEqual(port['device_owner'], 'network:router_interface')
self.assertEqual(port['device_id'], self.router['id'])
self.assertEqual(
sorted([subnet['id'] for subnet in subnets]),
sorted([fip['subnet_id'] for fip in port['fixed_ips']])
)
self.ping_router_mgt_address(self.router['id'])
# Ensure that if we destroy the nova instance, the RUG will rebuild
# the router with a new instance.
# This could live in a separate test case but it'd require the
# above as setup, so just piggyback on it.
old_server = self.get_router_appliance_server(self.router['id'])
LOG.debug('Original server: %s', old_server)
# NOTE(adam_g): In the gate, sometimes the appliance hangs on the
# first config update and health checks get queued up behind the
# hanging config update. If thats the case, we need to wait a while
# before deletion for the first to timeout.
time.sleep(30)
LOG.debug('Deleting original nova server: %s', old_server.id)
self.admin_clients.novaclient.servers.delete(old_server.id)
LOG.debug('Waiting %s seconds for astara health check to tick',
CONF.health_check_period)
time.sleep(CONF.health_check_period)
# look for the new server, retry giving rug time to do its thing.
new_server = self.get_router_appliance_server(
self.router['id'], retries=60, wait_for_active=True)
LOG.debug('Rebuilt new server found: %s', new_server)
self.assertNotEqual(old_server.id, new_server.id)
# routers report as ACTIVE initially (LP: #1491673)
time.sleep(2)
self.assert_router_is_active(self.router['id'])
self.ping_router_mgt_address(self.router['id'])

View File

@ -93,6 +93,12 @@ function configure_astara() {
iniset $ASTARA_CONF DEFAULT astara_mgt_service_port $ASTARA_MANAGEMENT_PORT
iniset $ASTARA_CONF DEFAULT api_listen $ASTARA_API_LISTEN
iniset $ASTARA_CONF DEFAULT api_port $ASTARA_API_PORT
iniset $ASTARA_CONF DEFAULT health_check_period 10
# NOTE(adam_g) When running in the gate on slow VMs, gunicorn workers in the appliance
# sometimes hang during config update and eventually timeout after 60s. Update
# config_timeout in the RUG to reflect that timeout.
iniset $ASTARA_CONF DEFAULT config_timeout 60
if [[ "$Q_AGENT" == "linuxbridge" ]]; then
iniset $ASTARA_CONF DEFAULT interface_driver "astara.common.linux.interface.BridgeInterfaceDriver"

View File

@ -0,0 +1,5 @@
[DEFAULT]
output_file = astara/test/functional/test.conf.sample
wrap_width = 79
namespace = astara.test.functional

View File

@ -67,6 +67,7 @@ oslo.config.opts =
astara.pez = astara.opts:list_pez_opts
astara.router = astara.opts:list_router_opts
astara.loadbalancer = astara.opts:list_loadbalancer_opts
astara.test.functional = astara.test.functional.config:list_opts
[build_sphinx]
all_files = 1

View File

@ -10,6 +10,7 @@ iso8601>=0.1.9 # MIT
mox>=0.5.3 # Apache-2.0
testtools>=1.4.0 # MIT
fixtures>=1.3.1 # Apache-2.0/BSD
tempest-lib>=0.13.0 # Apache-2.0
# Doc requirements
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD

View File

@ -1,36 +1,20 @@
#!/bin/bash -xe
echo $LOGDIR
FUNC_TEST_DIR=$(dirname $0)/../astara/test/functional/
CONFIG_FILE=$FUNC_TEST_DIR/test.conf
LOGDIR=${LOGDIR:-$FUNC_TEST_DIR}
LOG_FILE=$LOGDIR/astara_functional.log
APPLIANCE_API_PORT=${APPLIANCE_API_PORT:-5000}
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
if [ -z "$SERVICE_TENANT_ID" ]; then
SERVICE_TENANT_ID="$(keystone tenant-list | grep $SERVICE_TENANT_NAME | awk '{ print $2 }')"
fi
# Functional tests require a test astara router be created prior to the test
# run. Devstack does this, but you may specify another here. If not specified,
# the ID of the devstack created router will be used.
ASTARA_TEST_ROUTER_UUID=${ASTARA_TEST_ROUTER_UUID:-''}
function find_router() {
# Find the UUID of the astara router created by devstack.
router=$(neutron router-list | grep "ak-" | awk '{ print $2 }')
if [ $(echo "$router" | wc -l) -gt 1 ]; then
echo "ERROR: Found multiple astara routers, cannot continue."
exit 1
elif [ -z "$router" ]; then
echo "ERROR: Could not locate astara router."
exit 1
fi
echo $router
}
cat <<END >$CONFIG_FILE
[functional]
appliance_active_timeout=480
[DEFAULT]
debug=True
use_stderr=False
use_syslog=False
os_auth_url=$OS_AUTH_URL
os_username=$OS_USERNAME
os_password=$OS_PASSWORD
@ -38,11 +22,11 @@ os_tenant_name=$OS_TENANT_NAME
service_tenant_name=$SERVICE_TENANT_NAME
service_tenant_id=$SERVICE_TENANT_ID
appliance_api_port=$APPLIANCE_API_PORT
# Defaults for the gate
health_check_timeout=10
appliance_active_timeout=480
log_file=/opt/stack/logs/astara_functional.log
END
if [ -z "$ASTARA_TEST_ROUTER_UUID" ]; then
ASTARA_TEST_ROUTER_UUID="$(find_router)"
fi
echo "astara_test_router_uuid=$ASTARA_TEST_ROUTER_UUID" >>$CONFIG_FILE
tox -e functional