Add gate for Direct L7-Proxy to Octavia

Depends-On: https://review.openstack.org/569881
Change-Id: I5d02d897b1b8fac69606ec46acc903fc9e826de8
This commit is contained in:
Adam Harwell 2018-04-13 06:08:23 +09:00
parent 723228c2e1
commit 880073d669
26 changed files with 326 additions and 729 deletions

View File

@ -0,0 +1,36 @@
global
daemon
log /dev/log local0
log /dev/log local1 notice
defaults
log global
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000
frontend neutron-frontend-api
option httplog
bind 0.0.0.0:NEUTRON_ALTERNATE_API_PORT
mode http
acl url_lbaas path_beg /v2.0/lbaas
use_backend octavia-backend-api if url_lbaas
default_backend neutron-backend-api
backend octavia-backend-api
mode http
balance roundrobin
option forwardfor
reqrep ^([^\ :]*)\ /v2.0/lbaas/(.*) \1\ /load-balancer/v2.0/\2
server octavia-1 127.0.0.1:80 weight 1
backend neutron-backend-api
mode http
balance roundrobin
option forwardfor
# the devstack plugin will add an entry here looking like:
# server neutron-1 <IP>:<PORT> weight 1

View File

@ -75,6 +75,25 @@ function neutron_lbaas_configure_agent {
fi
}
function configure_neutron_api_haproxy {
echo "Configuring neutron API haproxy for l7"
install_package haproxy
cp ${NEUTRON_LBAAS_DIR}/devstack/etc/neutron/haproxy.cfg ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
sed -i.bak "s/NEUTRON_ALTERNATE_API_PORT/${NEUTRON_ALTERNATE_API_PORT}/" ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
NEUTRON_API_PORT=9696
echo " server neutron-1 ${HOST_IP}:${NEUTRON_API_PORT} weight 1" >> ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
/usr/sbin/haproxy -c -f ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg
run_process $NEUTRON_API_HAPROXY "/usr/sbin/haproxy -db -V -f ${NEUTRON_CONF_DIR}/lbaas-haproxy.cfg"
# Fix the endpoint
NEUTRON_ENDPOINT_ID=$(openstack endpoint list --service neutron -f value -c ID)
openstack endpoint set --url 'http://127.0.0.1:9695/' $NEUTRON_ENDPOINT_ID
}
function neutron_lbaas_generate_config_files {
# Uses oslo config generator to generate LBaaS sample configuration files
(cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh)
@ -129,11 +148,15 @@ if is_service_enabled $LBAAS_ANY; then
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring neutron-lbaas"
neutron_lbaas_generate_config_files
neutron_lbaas_configure_common
neutron_lbaas_configure_agent
if [[ "$PROXY_OCTAVIA" == "True" ]]; then
configure_neutron_api_haproxy
else
neutron_lbaas_generate_config_files
neutron_lbaas_configure_common
neutron_lbaas_configure_agent
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
elif [[ "$1" == "stack" && "$2" == "extra" && "$PROXY_OCTAVIA" != "True" ]]; then
# Initialize and start the LBaaS service
echo_summary "Initializing neutron-lbaas"
neutron_lbaas_start

View File

@ -35,3 +35,6 @@ SERVICES_LBAAS_CONF=$NEUTRON_CONF_DIR/$SERVICES_LBAAS_CONF_FILE
NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA=${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA:-"LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default"}
NEUTRON_LBAAS_SERVICE_PROVIDERV2=${NEUTRON_LBAAS_SERVICE_PROVIDERV2:-${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA}}
NEUTRON_ALTERNATE_API_PORT=9695
NEUTRON_API_HAPROXY="q-api-ha"

View File

@ -47,7 +47,7 @@ case $testtype in
if [ "$lbaasversion" = "lbaasv2" ]; then
case "$lbaasenv" in
"api"|"healthmonitor"|"listener"|"loadbalancer"|"member"|"minimal"|"pool")
"api"|"healthmonitor"|"listener"|"loadbalancer"|"member"|"minimal"|"pool"|"proxy_octavia")
testenv="apiv2"
;;
"scenario")

View File

@ -39,6 +39,13 @@ function _setup_octavia {
export DEVSTACK_LOCAL_CONFIG+="
enable_plugin octavia https://git.openstack.org/openstack/octavia
"
if [[ "$lbaasenv" == "proxy_octavia" ]]; then
export DEVSTACK_LOCAL_CONFIG+="
enable_service q-api-ha
export PROXY_OCTAVIA=True
export OCTAVIA_USE_LEGACY_RBAC=True
"
fi
# Use infra's cached version of the file
if [ -f /opt/stack/new/devstack/files/get-pip.py ]; then
export DEVSTACK_LOCAL_CONFIG+="

View File

@ -1,3 +1,7 @@
[[test-config|$TEMPEST_CONFIG]]
[lbaas]
test_with_noop = True
[[post-config|/etc/octavia/octavia.conf]]
[DEFAULT]
debug = True

View File

@ -30,4 +30,7 @@ lbaas_opts = [
cfg.ListOpt('session_persistence_types',
default=['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'],
help='Supported session persistence types for Neutron LBaaS'),
cfg.BoolOpt('test_with_noop', default=False,
help='Configures tempest to run with no-op drivers. Ignores '
'the operating_status during tests.'),
]

View File

@ -15,6 +15,7 @@
import time
from neutron_lib import constants as n_constants
from oslo_log import log as logging
from tempest.api.network import base
from tempest import config
@ -169,8 +170,6 @@ class BaseTestCase(base.BaseNetworkTest):
cls._wait_for_load_balancer_status(lb.get('id'))
cls._lbs_to_delete.append(lb.get('id'))
port = cls.ports_client.show_port(lb['vip_port_id'])
cls.ports.append(port['port'])
return lb
@classmethod
@ -204,6 +203,10 @@ class BaseTestCase(base.BaseNetworkTest):
timeout = 600
end_time = time.time() + timeout
lb = {}
# When running with no-op drivers there is no actual health to
# observe, so disable operating_status checks when running no-op.
if CONF.lbaas.test_with_noop:
operating_status = None
while time.time() < end_time:
try:
lb = cls.load_balancers_client.get_load_balancer(
@ -368,7 +371,12 @@ class BaseTestCase(base.BaseNetworkTest):
statuses = cls.load_balancers_client.get_load_balancer_status_tree(
load_balancer_id=load_balancer_id)
load_balancer = statuses['loadbalancer']
assert 'ONLINE' == load_balancer['operating_status']
# When running with no-op drivers there is no actual health to
# observe, so disable operating_status checks when running no-op.
if not CONF.lbaas.test_with_noop:
assert 'ONLINE' == load_balancer['operating_status']
assert 'ACTIVE' == load_balancer['provisioning_status']
if listener_ids:
@ -390,13 +398,16 @@ class BaseTestCase(base.BaseNetworkTest):
@classmethod
def _check_status_tree_thing(cls, actual_thing_ids, status_tree_things):
found_things = 0
status_tree_things = status_tree_things
assert len(actual_thing_ids) == len(status_tree_things)
for actual_thing_id in actual_thing_ids:
for status_tree_thing in status_tree_things:
if status_tree_thing['id'] == actual_thing_id:
assert 'ONLINE' == (
status_tree_thing['operating_status'])
# When running with no-op drivers there is no actual
# health to observe, so disable operating_status checks
# when running no-op.
if not CONF.lbaas.test_with_noop:
assert 'ONLINE' == (
status_tree_thing['operating_status'])
assert 'ACTIVE' == (
status_tree_thing['provisioning_status'])
found_things += 1
@ -410,6 +421,17 @@ class BaseTestCase(base.BaseNetworkTest):
)
return name
def _test_provisioning_status_if_exists(self, created_obj, get_obj):
prov_status_old = created_obj.pop('provisioning_status', None)
prov_status_new = get_obj.pop('provisioning_status', None)
if prov_status_old:
self.assertIn('PENDING_', prov_status_old)
if prov_status_new:
self.assertEqual(n_constants.ACTIVE, prov_status_new)
if not created_obj.get('updated_at') and get_obj.get('updated_at'):
get_obj['updated_at'] = None
class BaseAdminTestCase(BaseTestCase):

View File

@ -74,6 +74,7 @@ class TestHealthMonitors(base.BaseAdminTestCase):
hm_tenant_id = hm.get('tenant_id')
self.assertEqual(admin_tenant_id, hm_tenant_id)
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_health_monitor_empty_tenant_id_field(self):
"""
@ -84,6 +85,7 @@ class TestHealthMonitors(base.BaseAdminTestCase):
type=self.hm_protocol, delay=3, max_retries=10,
timeout=5, pool_id=self.pool.get('id'), tenant_id="")
@decorators.skip_because(bug="1468457")
@decorators.attr(type='smoke')
def test_create_health_monitor_for_another_tenant_id_field(self):
"""Test with admin user create health monitor for another tenant id.

View File

@ -54,6 +54,14 @@ class TestHealthMonitors(base.BaseTestCase):
'max_retries': 10, 'timeout': 5,
'pool_id': cls.pool.get('id')}
def _prep_list_comparison(self, single, obj_list):
single.pop('operating_status', None)
for obj in obj_list:
obj.pop('operating_status', None)
if not single.get('updated_at') and obj.get('updated_at'):
obj['updated_at'] = None
self._test_provisioning_status_if_exists(single, obj)
def test_list_health_monitors_empty(self):
hm_list = self.health_monitors_client.list_health_monitors()
self.assertEmpty(hm_list)
@ -61,6 +69,7 @@ class TestHealthMonitors(base.BaseTestCase):
def test_list_health_monitors_one(self):
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm_list = self.health_monitors_client.list_health_monitors()
self._prep_list_comparison(hm, hm_list)
self.assertIn(hm, hm_list)
@decorators.attr(type='smoke')
@ -81,22 +90,21 @@ class TestHealthMonitors(base.BaseTestCase):
timeout=2,
pool_id=new_pool.get('id'))
hm_list = self.health_monitors_client.list_health_monitors()
self._prep_list_comparison(hm1, hm_list)
self._prep_list_comparison(hm2, hm_list)
self.assertEqual(2, len(hm_list))
self.assertIn(hm1, hm_list)
self.assertIn(hm2, hm_list)
@decorators.attr(type='smoke')
def test_get_health_monitor(self):
def test_create_and_get_health_monitor(self):
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm_test = self.health_monitors_client.get_health_monitor(hm.get('id'))
self._test_provisioning_status_if_exists(hm, hm_test)
hm.pop('operating_status', None)
hm_test.pop('operating_status', None)
self.assertEqual(hm, hm_test)
@decorators.attr(type='smoke')
def test_create_health_monitor(self):
new_hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm = self.health_monitors_client.get_health_monitor(new_hm.get('id'))
self.assertEqual(new_hm, hm)
def test_create_health_monitor_missing_attribute(self):
self.assertRaises(ex.BadRequest, self._create_health_monitor,
type=self.hm_protocol, delay=3, max_retries=10,
@ -148,6 +156,9 @@ class TestHealthMonitors(base.BaseTestCase):
"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm_test = self.health_monitors_client.get_health_monitor(hm.get('id'))
self._test_provisioning_status_if_exists(hm, hm_test)
hm.pop('operating_status', None)
hm_test.pop('operating_status', None)
self.assertEqual(hm, hm_test)
self.assertTrue(hm_test.get('admin_state_up'))
@ -161,6 +172,9 @@ class TestHealthMonitors(base.BaseTestCase):
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm_test = self.health_monitors_client.get_health_monitor(hm.get('id'))
self._test_provisioning_status_if_exists(hm, hm_test)
hm.pop('operating_status', None)
hm_test.pop('operating_status', None)
self.assertEqual(hm, hm_test)
self.assertEqual('GET', hm_test.get('http_method'))
@ -173,6 +187,9 @@ class TestHealthMonitors(base.BaseTestCase):
raise self.skipException(msg)
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm_test = self.health_monitors_client.get_health_monitor(hm.get('id'))
self._test_provisioning_status_if_exists(hm, hm_test)
hm.pop('operating_status', None)
hm_test.pop('operating_status', None)
self.assertEqual(hm, hm_test)
self.assertEqual('/', hm_test.get('url_path'))
@ -186,9 +203,13 @@ class TestHealthMonitors(base.BaseTestCase):
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
hm_test = self.health_monitors_client.get_health_monitor(hm.get('id'))
self._test_provisioning_status_if_exists(hm, hm_test)
hm.pop('operating_status', None)
hm_test.pop('operating_status', None)
self.assertEqual(hm, hm_test)
self.assertEqual('200', hm_test.get('expected_codes'))
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_health_monitor_invalid_tenant_id(self):
"""Test create health monitor with invalid tenant_id"""
@ -357,50 +378,57 @@ class TestHealthMonitors(base.BaseTestCase):
max_retries=10, timeout=5,
pool_id=self.pool.get('id'))
max_retries = 1
new_hm = self._update_health_monitor(
hm.get('id'), max_retries=max_retries)
self._update_health_monitor(hm.get('id'), max_retries=max_retries)
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(max_retries, new_hm.get('max_retries'))
def test_update_health_monitor_missing_admin_state_up(self):
"""Test update health monitor with missing admin state field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertTrue(new_hm.get('admin_state_up'))
def test_update_health_monitor_missing_delay(self):
"""Test update health monitor with missing delay field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(hm.get('delay'), new_hm.get('delay'))
def test_update_health_monitor_missing_timeout(self):
"""Test update health monitor with missing timeout field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(hm.get('timeout'), new_hm.get('timeout'))
def test_update_health_monitor_missing_max_retries(self):
"""Test update health monitor with missing max retries field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(hm.get('max_retries'), new_hm.get('max_retries'))
def test_update_health_monitor_missing_http_method(self):
"""Test update health monitor with missing http_method field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(hm.get('http_method'), new_hm.get('http_method'))
def test_update_health_monitor_missing_url_path(self):
"""Test update health monitor with missing url_path field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(hm.get('url_path'), new_hm.get('url_path'))
def test_update_health_monitor_missing_expected_codes(self):
"""Test update health monitor with missing expected_codes field"""
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
new_hm = self._update_health_monitor(hm.get('id'))
self._update_health_monitor(hm.get('id'))
new_hm = self.health_monitors_client.get_health_monitor(hm.get('id'))
self.assertEqual(hm.get('expected_codes'),
new_hm.get('expected_codes'))

View File

@ -56,6 +56,7 @@ class ListenersTestJSON(base.BaseAdminTestCase):
def resource_cleanup(cls):
super(ListenersTestJSON, cls).resource_cleanup()
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_listener_empty_tenant_id(self):
"""Test create listener with an empty tenant id should fail"""
@ -69,6 +70,7 @@ class ListenersTestJSON(base.BaseAdminTestCase):
load_balancer_id=self.load_balancer_id,
listener_ids=[self.listener_id])
@decorators.skip_because(bug="1468457")
def test_create_listener_invalid_tenant_id(self):
"""Test create listener with an invalid tenant id"""
create_new_listener_kwargs = self.create_listener_kwargs
@ -85,6 +87,7 @@ class ListenersTestJSON(base.BaseAdminTestCase):
new_listener_id)
self.assertEqual(new_listener, listener)
@decorators.skip_because(bug="1468457")
@decorators.attr(type='smoke')
def test_create_listener_missing_tenant_id(self):
"""Test create listener with an missing tenant id.

View File

@ -60,14 +60,22 @@ class ListenersTestJSON(base.BaseTestCase):
"""Test get listener"""
listener = self.listeners_client.get_listener(
self.listener_id)
self._test_provisioning_status_if_exists(self.listener, listener)
self.assertEqual(self.listener, listener)
self._check_status_tree(load_balancer_id=self.load_balancer_id,
listener_ids=[self.listener_id])
def _prep_list_comparison(self, single, obj_list):
for obj in obj_list:
if not single.get('updated_at') and obj.get('updated_at'):
obj['updated_at'] = None
self._test_provisioning_status_if_exists(single, obj)
def test_list_listeners(self):
"""Test get listeners with one listener"""
listeners = self.listeners_client.list_listeners()
self.assertEqual(len(listeners), 1)
self._prep_list_comparison(self.listener, listeners)
self.assertIn(self.listener, listeners)
self._check_status_tree(load_balancer_id=self.load_balancer_id,
listener_ids=[self.listener_id])
@ -86,6 +94,8 @@ class ListenersTestJSON(base.BaseTestCase):
listener_ids=[self.listener_id, new_listener_id])
listeners = self.listeners_client.list_listeners()
self.assertEqual(len(listeners), 2)
self._prep_list_comparison(self.listener, listeners)
self._prep_list_comparison(new_listener, listeners)
self.assertIn(self.listener, listeners)
self.assertIn(new_listener, listeners)
self.assertNotEqual(self.listener, new_listener)
@ -104,7 +114,9 @@ class ListenersTestJSON(base.BaseTestCase):
listener_ids=[self.listener_id, new_listener_id])
listener = self.listeners_client.get_listener(
new_listener_id)
self._test_provisioning_status_if_exists(new_listener, listener)
self.assertEqual(new_listener, listener)
self._test_provisioning_status_if_exists(self.listener, new_listener)
self.assertNotEqual(self.listener, new_listener)
@decorators.attr(type='negative')
@ -150,6 +162,7 @@ class ListenersTestJSON(base.BaseTestCase):
listener_ids=[self.listener_id, new_listener_id])
listener = self.listeners_client.get_listener(
new_listener_id)
self._test_provisioning_status_if_exists(new_listener, listener)
self.assertEqual(new_listener, listener)
self.assertTrue(new_listener['admin_state_up'])
@ -197,6 +210,7 @@ class ListenersTestJSON(base.BaseTestCase):
self._check_status_tree(load_balancer_id=self.load_balancer_id,
listener_ids=[self.listener_id])
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_listener_invalid_tenant_id(self):
"""Test create listener with an invalid tenant id"""
@ -291,6 +305,7 @@ class ListenersTestJSON(base.BaseTestCase):
self._check_status_tree(load_balancer_id=self.load_balancer_id,
listener_ids=[self.listener_id])
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_listener_empty_tenant_id(self):
"""Test create listener with an empty tenant id"""
@ -317,6 +332,7 @@ class ListenersTestJSON(base.BaseTestCase):
listener_ids=[self.listener_id, new_listener_id])
listener = self.listeners_client.get_listener(
new_listener_id)
self._test_provisioning_status_if_exists(new_listener, listener)
self.assertEqual(new_listener, listener)
def test_create_listener_empty_description(self):
@ -333,6 +349,7 @@ class ListenersTestJSON(base.BaseTestCase):
listener_ids=[self.listener_id, new_listener_id])
listener = self.listeners_client.get_listener(
new_listener_id)
self._test_provisioning_status_if_exists(new_listener, listener)
self.assertEqual(new_listener, listener)
@decorators.attr(type='negative')
@ -370,6 +387,7 @@ class ListenersTestJSON(base.BaseTestCase):
self.listener_id)
self.assertEqual(listener.get('name'), 'new_name')
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_update_listener_invalid_tenant_id(self):
"""Test update listener with an invalid tenant id"""
@ -487,6 +505,7 @@ class ListenersTestJSON(base.BaseTestCase):
self.assertEqual(listener.get('connection_limit'),
old_connection_limit)
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_update_listener_empty_tenant_id(self):
"""Test update listener with an empty tenant id"""
@ -549,7 +568,9 @@ class ListenersTestJSON(base.BaseTestCase):
listener_ids=[self.listener_id, new_listener_id])
listener = self.listeners_client.get_listener(
new_listener_id)
self._test_provisioning_status_if_exists(new_listener, listener)
self.assertEqual(new_listener, listener)
self._test_provisioning_status_if_exists(self.listener, new_listener)
self.assertNotEqual(self.listener, new_listener)
self._delete_listener(new_listener_id)
self.assertRaises(ex.NotFound,

View File

@ -15,12 +15,15 @@
import netaddr
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as ex
from neutron_lbaas.tests.tempest.v2.api import base
CONF = config.CONF
class LoadBalancersTestJSON(base.BaseTestCase):
@ -85,6 +88,8 @@ class LoadBalancersTestJSON(base.BaseTestCase):
self.addCleanup(self._delete_load_balancer, new_load_balancer_id)
load_balancer = self.load_balancers_client.get_load_balancer(
new_load_balancer_id)
# TODO(rm_work): This is a really dumb test. It's essentially comparing
# two back-to-back GETs to each other.
self.assertEqual(new_load_balancer, load_balancer)
self.assertNotEqual(self.load_balancer, new_load_balancer)
@ -275,11 +280,12 @@ class LoadBalancersTestJSON(base.BaseTestCase):
@decorators.attr(type='negative')
def test_create_load_balancer_invalid_flavor_field(self):
"""Test create load balancer with an invalid flavor field"""
self.assertRaises(ex.NotFound,
self.assertRaises(ex.ClientRestClientException,
self.load_balancers_client.create_load_balancer,
vip_subnet_id=self.subnet['id'],
flavor_id="NO_SUCH_FLAVOR")
@decorators.skip_because(bug="1655768")
@decorators.attr(type='negative')
def test_create_load_balancer_provider_flavor_conflict(self):
"""Test create load balancer with both a provider and a flavor"""
@ -287,7 +293,7 @@ class LoadBalancersTestJSON(base.BaseTestCase):
self.load_balancers_client.create_load_balancer,
vip_subnet_id=self.subnet['id'],
flavor_id="NO_SUCH_FLAVOR",
provider="NO_SUCH_PROVIDER")
provider="octavia")
@decorators.attr(type='smoke')
def test_update_load_balancer(self):
@ -398,7 +404,14 @@ class LoadBalancersTestJSON(base.BaseTestCase):
statuses = self.load_balancers_client.get_load_balancer_status_tree(
self.load_balancer_id)
load_balancer = statuses['loadbalancer']
self.assertEqual("ONLINE", load_balancer['operating_status'])
if CONF.lbaas.test_with_noop:
# Just make sure the status is valid, because operating_status is
# slightly unpredictable in NOOP mode.
self.assertIn(load_balancer['operating_status'],
['ONLINE', 'OFFLINE'])
else:
# With real drivers, we should definitely go to ONLINE
self.assertEqual("ONLINE", load_balancer['operating_status'])
self.assertEqual("ACTIVE", load_balancer['provisioning_status'])
self.assertEmpty(load_balancer['listeners'])

View File

@ -61,6 +61,7 @@ class MemberTestJSON(base.BaseAdminTestCase):
def resource_cleanup(cls):
super(MemberTestJSON, cls).resource_cleanup()
@decorators.skip_because(bug="1468457")
@decorators.attr(type='smoke')
def test_create_member_invalid_tenant_id(self):
"""Test create member with invalid tenant_id"""
@ -74,6 +75,7 @@ class MemberTestJSON(base.BaseAdminTestCase):
self.assertEqual(member['subnet_id'], self.subnet_id)
self.assertEqual(member['tenant_id'], "$232!$pw")
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_member_empty_tenant_id(self):
"""Test create member with an empty tenant_id should fail"""

View File

@ -149,6 +149,7 @@ class MemberTestJSON(base.BaseTestCase):
self.assertRaises(ex.BadRequest, self._create_member,
self.pool_id, **member_opts)
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_member_missing_required_field_subnet_id(self):
"""Test create a member with missing field subnet_id """
@ -165,6 +166,7 @@ class MemberTestJSON(base.BaseTestCase):
self.assertRaises(ex.BadRequest, self._create_member,
self.pool_id, **member_opts)
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_member_invalid_tenant_id(self):
"""Test create member with invalid tenant_id"""
@ -228,6 +230,7 @@ class MemberTestJSON(base.BaseTestCase):
self.assertRaises(ex.BadRequest, self._create_member,
self.pool_id, **member_opts)
@decorators.skip_because(bug="1468457") # Octavia does a floor()
@decorators.attr(type='negative')
def test_create_member_nonint_weight(self):
"""Test create member with nonint weight"""
@ -239,6 +242,7 @@ class MemberTestJSON(base.BaseTestCase):
self.assertRaises(ex.BadRequest, self._create_member,
self.pool_id, **member_opts)
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_member_empty_tenant_id(self):
"""Test create member with an empty tenant_id"""
@ -327,8 +331,9 @@ class MemberTestJSON(base.BaseTestCase):
self.assertEqual(1, member["weight"])
# Lets overwrite the defaults
member_opts = {"weight": 10, "admin_state_up": False}
member = self._update_member(self.pool_id, member_id,
**member_opts)
self._update_member(self.pool_id, member_id,
**member_opts)
member = self.members_client.get_member(self.pool_id, member_id)
# And make sure they stick
self.assertFalse(member["admin_state_up"])
self.assertEqual(10, member["weight"])
@ -336,15 +341,14 @@ class MemberTestJSON(base.BaseTestCase):
def test_update_member_missing_admin_state_up(self):
"""Test that we can update a member with missing admin_state_up."""
member_opts = self.build_member_opts()
member = self._create_member(self.pool_id,
**member_opts)
member = self._create_member(self.pool_id, **member_opts)
member_id = member["id"]
self.addCleanup(self._delete_member, self.pool_id, member_id)
self.assertTrue(member["admin_state_up"])
self.assertEqual(1, member["weight"])
member_opts = {"weight": 10}
member = self._update_member(self.pool_id, member_id,
**member_opts)
self._update_member(self.pool_id, member_id, **member_opts)
member = self.members_client.get_member(self.pool_id, member_id)
self.assertTrue(member["admin_state_up"])
self.assertEqual(10, member["weight"])
@ -358,8 +362,8 @@ class MemberTestJSON(base.BaseTestCase):
self.assertTrue(member["admin_state_up"])
self.assertEqual(1, member["weight"])
member_opts = {"admin_state_up": False}
member = self._update_member(self.pool_id, member_id,
**member_opts)
self._update_member(self.pool_id, member_id, **member_opts)
member = self.members_client.get_member(self.pool_id, member_id)
self.assertFalse(member["admin_state_up"])
self.assertEqual(1, member["weight"])

View File

@ -76,6 +76,7 @@ class TestPools(base.BaseAdminTestCase):
self.addCleanup(self._delete_pool, response['id'])
return response
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_pool_using_empty_tenant_field(self):
"""Test create pool with empty tenant field should fail"""
@ -124,8 +125,9 @@ class TestPools(base.BaseAdminTestCase):
new_pool = self._prepare_and_create_pool()
session_persistence = {"type": "APP_COOKIE",
"cookie_name": "my_cookie"}
pool = self._update_pool(new_pool.get('id'),
session_persistence=session_persistence)
self._update_pool(new_pool.get('id'),
session_persistence=session_persistence)
pool = self.pools_client.get_pool(new_pool.get('id'))
self.assertEqual(session_persistence, pool.get('session_persistence'))
def test_update_pool_sesssion_persistence_app_to_http(self):
@ -136,11 +138,13 @@ class TestPools(base.BaseAdminTestCase):
new_pool = self._prepare_and_create_pool()
session_persistence = {"type": "APP_COOKIE",
"cookie_name": "my_cookie"}
pool = self._update_pool(new_pool.get('id'),
session_persistence=session_persistence)
self._update_pool(new_pool.get('id'),
session_persistence=session_persistence)
pool = self.pools_client.get_pool(new_pool.get('id'))
self.assertEqual(session_persistence, pool.get('session_persistence'))
pool = self._update_pool(new_pool.get('id'),
session_persistence={"type": "HTTP_COOKIE"})
self._update_pool(new_pool.get('id'),
session_persistence={"type": "HTTP_COOKIE"})
pool = self.pools_client.get_pool(new_pool.get('id'))
session_persistence = {"type": "HTTP_COOKIE",
"cookie_name": None}
self.assertEqual(session_persistence, pool.get('session_persistence'))
@ -150,6 +154,7 @@ class TestPools(base.BaseAdminTestCase):
"""Test delete admin pool"""
new_pool = self._prepare_and_create_pool(cleanup=False)
pool = self.pools_client.get_pool(new_pool.get('id'))
self._test_provisioning_status_if_exists(new_pool, pool)
self.assertEqual(new_pool, pool)
self._delete_pool(new_pool.get('id'))
self.assertRaises(ex.NotFound, self.pools_client.get_pool,

View File

@ -80,6 +80,12 @@ class TestPools(base.BaseTestCase):
self.addCleanup(self._delete_pool, response['id'])
return response
def _prep_list_comparison(self, single, obj_list):
for obj in obj_list:
if not single.get('updated_at') and obj.get('updated_at'):
obj['updated_at'] = None
self._test_provisioning_status_if_exists(single, obj)
def test_list_pools_empty(self):
"""Test get pools when empty"""
pools = self.pools_client.list_pools()
@ -88,9 +94,9 @@ class TestPools(base.BaseTestCase):
def test_list_pools_one(self):
"""Test get pools with one pool"""
new_pool = self._prepare_and_create_pool()
new_pool = self.pools_client.get_pool(new_pool['id'])
pools = self.pools_client.list_pools()
self.assertEqual(1, len(pools))
self._prep_list_comparison(new_pool, pools)
self.assertIn(new_pool, pools)
@decorators.attr(type='smoke')
@ -100,21 +106,17 @@ class TestPools(base.BaseTestCase):
new_pool2 = self._prepare_and_create_pool()
pools = self.pools_client.list_pools()
self.assertEqual(2, len(pools))
self._prep_list_comparison(new_pool1, pools)
self._prep_list_comparison(new_pool2, pools)
self.assertIn(new_pool1, pools)
self.assertIn(new_pool2, pools)
@decorators.attr(type='smoke')
def test_get_pool(self):
"""Test get pool"""
new_pool = self._prepare_and_create_pool()
pool = self.pools_client.get_pool(new_pool.get('id'))
self.assertEqual(new_pool, pool)
@decorators.attr(type='smoke')
def test_create_pool(self):
def test_create_and_get_pool(self):
"""Test create pool"""
new_pool = self._prepare_and_create_pool()
pool = self.pools_client.get_pool(new_pool.get('id'))
self._test_provisioning_status_if_exists(new_pool, pool)
self.assertEqual(new_pool, pool)
@decorators.attr(type='negative')
@ -247,6 +249,7 @@ class TestPools(base.BaseTestCase):
protocol=self.pool_protocol,
listener_id="$@5$%$7863")
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_pool_invalid_tenant_id_field(self):
"""Test create pool with invalid tenant_id field"""
@ -304,6 +307,7 @@ class TestPools(base.BaseTestCase):
lb_algorithm='ROUND_ROBIN',
listener_id=self.listener['id'])
@decorators.skip_because(bug="1765796")
@decorators.attr(type='negative')
def test_create_pool_empty_session_persistence_field(self):
"""Test create pool with empty session persistence field"""
@ -329,6 +333,7 @@ class TestPools(base.BaseTestCase):
admin_state_up="",
lb_algorithm='ROUND_ROBIN')
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_pool_empty_tenant_field(self):
"""Test create pool with empty tenant field"""
@ -338,6 +343,7 @@ class TestPools(base.BaseTestCase):
lb_algorithm='ROUND_ROBIN',
listener_id=self.listener['id'])
@decorators.skip_because(bug="1468457")
@decorators.attr(type='negative')
def test_create_pool_for_other_tenant_field(self):
"""Test create pool for other tenant field"""
@ -389,6 +395,7 @@ class TestPools(base.BaseTestCase):
new_pool = self._prepare_and_create_pool(
session_persistence={'type': 'HTTP_COOKIE'})
pool = self.pools_client.get_pool(new_pool.get('id'))
self._test_provisioning_status_if_exists(new_pool, pool)
self.assertEqual(new_pool, pool)
def test_create_pool_with_session_persistence_app_cookie(self):
@ -397,6 +404,7 @@ class TestPools(base.BaseTestCase):
session_persistence={'type': 'APP_COOKIE',
'cookie_name': 'sessionId'})
pool = self.pools_client.get_pool(new_pool.get('id'))
self._test_provisioning_status_if_exists(new_pool, pool)
self.assertEqual(new_pool, pool)
@decorators.attr(type='negative')
@ -427,9 +435,8 @@ class TestPools(base.BaseTestCase):
"""Test update pool"""
new_pool = self._prepare_and_create_pool()
desc = 'testing update with new description'
pool = self._update_pool(new_pool.get('id'),
description=desc,
wait=True)
self._update_pool(new_pool.get('id'), description=desc, wait=True)
pool = self.pools_client.get_pool(new_pool.get('id'))
self.assertEqual(desc, pool.get('description'))
def test_update_pool_missing_name(self):
@ -519,6 +526,7 @@ class TestPools(base.BaseTestCase):
self.assertRaises(ex.BadRequest, self.pools_client.update_pool,
new_pool.get('id'), admin_state_up="")
@decorators.skip_because(bug="1765796")
@decorators.attr(type='negative')
def test_update_pool_empty_session_persistence(self):
"""Test update pool with empty session persistence field"""
@ -546,6 +554,7 @@ class TestPools(base.BaseTestCase):
"""Test delete pool"""
new_pool = self._prepare_and_create_pool(cleanup=False)
pool = self.pools_client.get_pool(new_pool.get('id'))
self._test_provisioning_status_if_exists(new_pool, pool)
self.assertEqual(new_pool, pool)
self._delete_pool(new_pool.get('id'))
self.assertRaises(ex.NotFound, self.pools_client.get_pool,
@ -555,6 +564,7 @@ class TestPools(base.BaseTestCase):
"""Test delete pool that doesn't exist"""
new_pool = self._prepare_and_create_pool(cleanup=False)
pool = self.pools_client.get_pool(new_pool.get('id'))
self._test_provisioning_status_if_exists(new_pool, pool)
self.assertEqual(new_pool, pool)
self._delete_pool(new_pool.get('id'))
self.assertRaises(ex.NotFound, self._delete_pool,

View File

@ -1,229 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2016 Rackspace Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest import test
import testscenarios
from neutron_lbaas.tests.tempest.v2.api import base
CONF = config.CONF
# Use local tempest conf if one is available.
# This usually means we're running tests outside of devstack
if os.path.exists('./tests/tempest/etc/dev_tempest.conf'):
CONF.set_config_path('./tests/tempest/etc/dev_tempest.conf')
class AdminStateTests(testscenarios.TestWithScenarios,
base.BaseTestCase):
"""
Scenario Tests(admin_state_up tests):
This class supplies the resource set up methods and the check
operating status methods for the admin_sate_up tests.
"""
@classmethod
def resource_setup(cls):
super(AdminStateTests, cls).resource_setup()
if not test.is_extension_enabled("lbaasv2", "network"):
msg = "lbaas extension not enabled."
raise cls.skipException(msg)
network_name = data_utils.rand_name('network-')
cls.network = cls.create_network(network_name)
cls.subnet = cls.create_subnet(cls.network)
cls.tenant_id = cls.subnet.get('tenant_id')
cls.subnet_id = cls.subnet.get('id')
cls.protocol = 'HTTP'
cls.port = 8081
cls.lb_algorithm = 'ROUND_ROBIN'
cls.address = '127.0.0.1'
@classmethod
def resource_setup_load_balancer(cls, admin_state_up_flag):
cls.create_lb_kwargs = {'tenant_id': cls.tenant_id,
'vip_subnet_id': cls.subnet_id,
'admin_state_up': admin_state_up_flag}
cls.load_balancer = cls._create_active_load_balancer(
**cls.create_lb_kwargs)
cls.load_balancer_id = cls.load_balancer['id']
@classmethod
def resource_setup_listener(cls, admin_state_up_flag):
cls.create_listener_kwargs = {'loadbalancer_id': cls.load_balancer_id,
'protocol': cls.protocol,
'protocol_port': cls.port,
'admin_state_up': admin_state_up_flag
}
cls.listener = cls._create_listener(
**cls.create_listener_kwargs)
cls.listener_id = cls.listener['id']
@classmethod
def resource_setup_pool(cls, admin_state_up_flag):
cls.create_pool_kwargs = {'protocol': cls.protocol,
'lb_algorithm': cls.lb_algorithm,
'listener_id': cls.listener_id,
'admin_state_up': admin_state_up_flag
}
cls.pool = cls._create_pool(
**cls.create_pool_kwargs)
cls.pool_id = cls.pool['id']
@classmethod
def resource_setup_member(cls, admin_state_up_flag):
cls.create_member_kwargs = {'address': cls.address,
'protocol_port': cls.port,
'subnet_id': cls.subnet_id,
'admin_state_up': admin_state_up_flag}
cls.member = cls._create_member(
cls.pool_id, **cls.create_member_kwargs)
cls.member_id = cls.member['id']
@classmethod
def resource_set_health_monitor(cls, admin_state_up_flag, creator):
cls.create_hm_kwargs = {'type': cls.protocol,
'delay': 3,
'max_retries': 10,
'timeout': 5,
'pool_id': cls.pool_id,
'admin_state_up': admin_state_up_flag,
'cleanup': False}
cls.health_monitor = creator(**cls.create_hm_kwargs)
cls.health_monitor_id = cls.health_monitor['id']
@classmethod
def resource_cleanup(cls):
super(AdminStateTests, cls).resource_cleanup()
def check_lb_operating_status(self,
load_balancer,
listeners=None,
pools=None,
members=None):
if bool(load_balancer) and self.load_balancer.get('admin_state_up'):
self.assertEqual(
load_balancer.get('operating_status'), 'ONLINE')
return True
elif bool(load_balancer):
self.assertEqual(
load_balancer.get('operating_status'), 'DISABLED')
if bool(listeners):
self.assertEqual(listeners[0].
get('operating_status'), 'DISABLED')
if bool(pools):
self.assertEqual(pools[0].
get('operating_status'), 'DISABLED')
if bool(members):
self.assertEqual(members[0].
get('operating_status'), 'DISABLED')
return False
def check_listener_operating_status(self,
listeners,
pools=None,
members=None):
if bool(listeners) and self.listener.get('admin_state_up'):
self.assertEqual(listeners[0].
get('operating_status'), 'ONLINE')
return True
elif bool(listeners):
self.assertEqual(listeners[0].
get('operating_status'), 'DISABLED')
if bool(pools):
self.assertEqual(pools[0].
get('operating_status'), 'DISABLED')
if bool(members):
self.assertEqual(members[0].
get('operating_status'), 'DISABLED')
return False
def check_pool_operating_status(self,
pools,
members=None):
if bool(pools) and self.pool.get('admin_state_up'):
self.assertEqual(pools[0].
get('operating_status'), 'ONLINE')
return True
elif bool(pools):
self.assertEqual(pools[0].
get('operating_status'), 'DISABLED')
if bool(members):
self.assertEqual(members[0].
get('operating_status'), 'DISABLED')
return False
def check_member_operating_status(self, members):
if bool(members) and self.member.get('admin_state_up'):
self.assertEqual(members[0].
get('operating_status'), 'ONLINE')
return True
elif bool(members):
self.assertEqual(members[0].
get('operating_status'), 'DISABLED')
return False
def check_health_monitor_provisioning_status(self, health_monitor):
if bool(health_monitor) and self.health_monitor.get('admin_state_up'):
self.assertEqual(health_monitor.get('provisioning_status'),
'ACTIVE')
return True
elif bool(health_monitor):
self.assertEqual(health_monitor.get('provisioning_status'),
'DISABLED')
return False
def check_operating_status(self):
statuses = (self.load_balancers_client.
get_load_balancer_status_tree
(self.load_balancer_id))
load_balancer = statuses['loadbalancer']
listeners = load_balancer['listeners']
pools = None
members = None
health_monitor = None
if bool(listeners):
pools = listeners[0]['pools']
if bool(pools):
members = pools[0]['members']
health_monitor = pools[0]['healthmonitor']
if self.check_lb_operating_status(load_balancer,
listeners,
pools,
members):
if self.check_listener_operating_status(listeners,
pools,
members):
if self.check_pool_operating_status(pools,
members):
self.check_member_operating_status(members)
self.check_health_monitor_provisioning_status(
health_monitor)

View File

@ -1,145 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib import decorators
import testscenarios
from neutron_lbaas.tests.tempest.v2.ddt import base_ddt
CONF = config.CONF
scenario_lb_T = ('lb_T', {'lb_flag': True})
scenario_lb_F = ('lb_F', {'lb_flag': False})
scenario_listener_T = ('listener_T', {'listener_flag': True})
scenario_listener_F = ('listener_F', {'listener_flag': False})
scenario_pool_T = ('pool_T', {'pool_flag': True})
scenario_pool_F = ('pool_F', {'pool_flag': False})
scenario_healthmonitor_T = ('healthmonitor_T', {'healthmonitor_flag': True})
scenario_healthmonitor_F = ('healthmonitor_F', {'healthmonitor_flag': False})
scenario_healthmonitor_to_flag_T = ('healthmonitor_to_flag_T', {
'healthmonitor_to_flag': True})
scenario_healthmonitor_to_flag_F = ('healthmonitor_to_flag_F', {
'healthmonitor_to_flag': False})
# The following command creates 16 unique scenarios
scenario_create_health_monitor = testscenarios.multiply_scenarios(
[scenario_lb_T, scenario_lb_F],
[scenario_listener_T, scenario_listener_F],
[scenario_pool_T, scenario_pool_F],
[scenario_healthmonitor_T, scenario_healthmonitor_F])
# The following command creates 32 unique scenarios
scenario_update_health_monitor = testscenarios.multiply_scenarios(
[scenario_healthmonitor_to_flag_T, scenario_healthmonitor_to_flag_F],
scenario_create_health_monitor)
class BaseHealthMonitorAdminStateTest(base_ddt.AdminStateTests):
@classmethod
def resource_setup(cls):
super(BaseHealthMonitorAdminStateTest, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(BaseHealthMonitorAdminStateTest, cls).resource_cleanup()
def setUp(self):
"""Set up resources.
Including :load balancer, listener, and pool and
health_monitor with scenarios.
"""
super(BaseHealthMonitorAdminStateTest, self).setUp()
self.resource_setup_load_balancer(self.lb_flag)
self.addCleanup(self._delete_load_balancer, self.load_balancer_id)
self.resource_setup_listener(self.listener_flag)
self.addCleanup(self._delete_listener, self.listener_id)
self.resource_setup_pool(self.pool_flag)
self.addCleanup(self._delete_pool, self.pool_id)
self.resource_set_health_monitor(self.healthmonitor_flag,
self._create_health_monitor)
self.addCleanup(self._delete_health_monitor, self.health_monitor_id)
@classmethod
def resource_setup_listener(cls, admin_state_up_flag):
"""Set up resources for listener."""
(super(BaseHealthMonitorAdminStateTest, cls).
resource_setup_listener(admin_state_up_flag))
@classmethod
def resource_setup_pool(cls, admin_state_up_flag):
"""Set up resources for pool."""
(super(BaseHealthMonitorAdminStateTest, cls).
resource_setup_pool(admin_state_up_flag))
@classmethod
def resource_setup_load_balancer(cls, admin_state_up_flag):
"""Set up resources for load balancer."""
(super(BaseHealthMonitorAdminStateTest, cls).
resource_setup_load_balancer(admin_state_up_flag))
class CreateHealthMonitorAdminStateTest(BaseHealthMonitorAdminStateTest):
scenarios = scenario_create_health_monitor
"""
Tests the following operations in the Neutron-LBaaS API using the
REST client for health monitor with testscenarios, the goal is to test
the various admin_state_up boolean combinations and their expected
operating_status and provision_status results from the status tree.
create healthmonitor
"""
# @decorators.skip_because(bug="1449775")
def test_create_health_monitor_with_scenarios(self):
"""Test creating healthmonitor with 16 scenarios.
Compare the status tree before and after setting up admin_state_up flag
for health monitor.
"""
self.check_operating_status()
class UpdateHealthMonitorAdminStateTest(BaseHealthMonitorAdminStateTest):
scenarios = scenario_update_health_monitor
"""
Tests the following operations in the Neutron-LBaaS API using the
REST client for health monitor with testscenarios, the goal is to test
the various admin_state_up boolean combinations and their expected
operating_status and provision_status results from the status tree.
update healthmonitor
"""
@decorators.skip_because(bug="1449775")
def test_update_health_monitor_with_admin_state_up(self):
"""Test update a monitor.
Compare the status tree before and after setting the admin_state_up
flag for health_monitor.
"""
self.create_health_monitor_kwargs = {
'admin_state_up': self.healthmonitor_to_flag}
self.health_monitor = self._update_health_monitor(
self.health_monitor_id, **self.create_health_monitor_kwargs)
self.check_operating_status()

View File

@ -1,132 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
import testscenarios
from neutron_lbaas.tests.tempest.v2.ddt import base_ddt
CONF = config.CONF
"""
Tests the following operations in the Neutron-LBaaS API using the
REST client for Listeners:
|-----|------------------|------------------|-------------------------|
|S.No |Action |LB admin_state_up | Listener admin_state_up |
|-----|------------------|------------------|-------------------------|
| 1 | Create Listener | True | True |
| 2 | | True | False |
| 3 | | False | True |
| 4 | | False | False |
| 5 | Update Listener | True | True --> True |
| 6 | | True | True --> False |
| 7 | | True | False --> True |
| 8 | | True | False --> False |
| 9 | | False | True --> True |
| 10 | | False | True --> False |
| 11 | | False | False --> True |
| 12 | | False | False --> False |
|-----|------------------|------------------|-------------------------|
"""
# set up the scenarios
scenario_lb_T = ('lb_T', {'lb_flag': True})
scenario_lb_F = ('lb_F', {'lb_flag': False})
scenario_listener_T = ('listener_T', {'listener_flag': True})
scenario_listener_F = ('listener_F', {'listener_flag': False})
scenario_lis_to_flag_T = ('listener_to_flag_T', {'listener_to_flag': True})
scenario_lis_to_flag_F = ('listener_to_flag_F', {'listener_to_flag': False})
# The following command creates 4 unique scenarios
scenario_create_member = testscenarios.multiply_scenarios(
[scenario_lb_T, scenario_lb_F],
[scenario_listener_T, scenario_listener_F])
# The following command creates 8 unique scenarios
scenario_update_member = testscenarios.multiply_scenarios(
[scenario_lis_to_flag_T, scenario_lis_to_flag_F],
scenario_create_member)
class CreateListenerAdminStateTests(base_ddt.AdminStateTests):
scenarios = scenario_create_member
@classmethod
def resource_setup(cls):
super(CreateListenerAdminStateTests, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(CreateListenerAdminStateTests, cls).resource_cleanup()
@classmethod
def setup_load_balancer(cls, **kwargs):
super(CreateListenerAdminStateTests,
cls).setup_load_balancer(**kwargs)
def test_create_listener_with_lb_and_listener_admin_states_up(self):
"""Test create a listener.
Create a listener with various combinations of
values for admin_state_up field of the listener and
the load-balancer.
"""
self.resource_setup_load_balancer(self.lb_flag)
self.resource_setup_listener(self.listener_flag)
self.check_operating_status()
self._delete_listener(self.listener_id)
self._delete_load_balancer(self.load_balancer_id)
class UpdateListenerAdminStateTests(base_ddt.AdminStateTests):
scenarios = scenario_update_member
@classmethod
def resource_setup(cls):
super(UpdateListenerAdminStateTests, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(UpdateListenerAdminStateTests, cls).resource_cleanup()
@classmethod
def setup_load_balancer(cls, **kwargs):
super(UpdateListenerAdminStateTests,
cls).setup_load_balancer(**kwargs)
def test_update_listener_with_listener_admin_state_up(self):
"""Test updating a listener.
Update a listener with various combinations of
admin_state_up field of the listener and the
load-balancer.
"""
self.resource_setup_load_balancer(self.lb_flag)
self.resource_setup_listener(self.listener_flag)
self.check_operating_status()
self.listener = (self._update_listener(
self.listener_id,
name='new_name',
admin_state_up=self.listener_to_flag))
self.check_operating_status()
self._delete_listener(self.listener_id)
self._delete_load_balancer(self.load_balancer_id)

View File

@ -1,167 +0,0 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
import testscenarios
from neutron_lbaas.tests.tempest.v2.ddt import base_ddt
CONF = config.CONF
"""
Tests the following operations in the Neutron-LBaaS API using the
REST client with various combinations of values for the
admin_state_up field of lb, listener, pool and member.
create member
update member
"""
# set up the scenarios
scenario_lb_T = ('lb_T', {'lb_flag': True})
scenario_lb_F = ('lb_F', {'lb_flag': False})
scenario_listener_T = ('listener_T', {'listener_flag': True})
scenario_listener_F = ('listener_F', {'listener_flag': False})
scenario_pool_T = ('pool_T', {'pool_flag': True})
scenario_pool_F = ('pool_F', {'pool_flag': False})
scenario_member_T = ('member_T', {'member_flag': True})
scenario_member_F = ('member_F', {'member_flag': False})
scenario_mem_to_flag_T = ('member_to_flag_T', {'member_to_flag': True})
scenario_mem_to_flag_F = ('member_to_flag_F', {'member_to_flag': False})
# The following command creates 16 unique scenarios
scenario_create_member = testscenarios.multiply_scenarios(
[scenario_lb_T, scenario_lb_F],
[scenario_listener_T, scenario_listener_F],
[scenario_pool_T, scenario_pool_F],
[scenario_member_T, scenario_member_F])
# The following command creates 32 unique scenarios
scenario_update_member = testscenarios.multiply_scenarios(
[scenario_mem_to_flag_T, scenario_mem_to_flag_F],
scenario_create_member)
class CreateMemberAdminStateTests(base_ddt.AdminStateTests):
scenarios = scenario_create_member
@classmethod
def resource_setup(cls):
super(CreateMemberAdminStateTests, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(CreateMemberAdminStateTests, cls).resource_cleanup()
def setUp(self):
"""Set up load balancer, listener, pool and member."""
super(CreateMemberAdminStateTests, self).setUp()
self.resource_setup_load_balancer(self.lb_flag)
self.addCleanup(self._delete_load_balancer, self.load_balancer_id)
self.resource_setup_listener(self.listener_flag)
self.addCleanup(self._delete_listener, self.listener_id)
self.resource_setup_pool(self.pool_flag)
self.addCleanup(self._delete_pool, self.pool_id)
self.resource_setup_member(self.member_flag)
self.addCleanup(self._delete_member, self.pool_id, self.member_id)
@classmethod
def resource_setup_load_balancer(cls, admin_state_up_flag):
(super(CreateMemberAdminStateTests, cls).
resource_setup_load_balancer(admin_state_up_flag))
@classmethod
def resource_setup_listener(cls, admin_state_up_flag):
(super(CreateMemberAdminStateTests, cls).
resource_setup_listener(admin_state_up_flag))
@classmethod
def resource_setup_pool(cls, admin_state_up_flag):
(super(CreateMemberAdminStateTests, cls).
resource_setup_pool(admin_state_up_flag))
@classmethod
def resource_setup_member(cls, admin_state_up_flag):
(super(CreateMemberAdminStateTests, cls).
resource_setup_member(admin_state_up_flag))
def test_create_member_with_admin_state_up(self):
"""Test create a member. """
self.check_operating_status()
class UpdateMemberAdminStateTests(base_ddt.AdminStateTests):
scenarios = scenario_update_member
@classmethod
def resource_setup(cls):
super(UpdateMemberAdminStateTests, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(UpdateMemberAdminStateTests, cls).resource_cleanup()
def setUp(self):
"""Set up load balancer, listener, pool and member resources."""
super(UpdateMemberAdminStateTests, self).setUp()
self.resource_setup_load_balancer(self.lb_flag)
self.addCleanup(self._delete_load_balancer, self.load_balancer_id)
self.resource_setup_listener(self.listener_flag)
self.addCleanup(self._delete_listener, self.listener_id)
self.resource_setup_pool(self.pool_flag)
self.addCleanup(self._delete_pool, self.pool_id)
self.resource_setup_member(self.member_flag)
self.addCleanup(self._delete_member, self.pool_id, self.member_id)
@classmethod
def resource_setup_load_balancer(cls, admin_state_up_flag):
(super(UpdateMemberAdminStateTests, cls).
resource_setup_load_balancer(admin_state_up_flag))
@classmethod
def resource_setup_listener(cls, admin_state_up_flag):
(super(UpdateMemberAdminStateTests, cls).
resource_setup_listener(admin_state_up_flag))
@classmethod
def resource_setup_pool(cls, admin_state_up_flag):
(super(UpdateMemberAdminStateTests, cls).
resource_setup_pool(admin_state_up_flag))
@classmethod
def resource_setup_member(cls, admin_state_up_flag):
(super(UpdateMemberAdminStateTests, cls).
resource_setup_member(admin_state_up_flag))
def test_update_member_with_admin_state_up(self):
"""Test update a member. """
self.create_member_kwargs = {'admin_state_up': self.member_to_flag}
self.member = self._update_member(self.pool_id,
self.member_id,
**self.create_member_kwargs)
self.check_operating_status()

View File

@ -0,0 +1,15 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -0,0 +1,60 @@
- hosts: all
name: Autoconverted job legacy-neutron-lbaasv2-dsvm-api from old job gate-neutron-lbaasv2-dsvm-api-ubuntu-xenial
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack-infra/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
git://git.openstack.org \
openstack-infra/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_NOTESTS=1
export DEVSTACK_GATE_EXERCISES=0
export DEVSTACK_GATE_NEUTRON=1
export DEVSTACK_GATE_INSTALL_TESTONLY=1
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
export PROJECTS="openstack/barbican $PROJECTS"
export PROJECTS="openstack/python-barbicanclient $PROJECTS"
export PROJECTS="openstack/diskimage-builder $PROJECTS"
export PROJECTS="openstack/neutron-lbaas $PROJECTS"
export PROJECTS="openstack/octavia $PROJECTS"
function gate_hook {
$BASE/new/neutron-lbaas/neutron_lbaas/tests/contrib/gate_hook.sh tempest lbaasv2 proxy_octavia
}
export -f gate_hook
function post_test_hook {
$BASE/new/neutron-lbaas/neutron_lbaas/tests/contrib/post_test_hook.sh tempest lbaasv2 proxy_octavia
}
export -f post_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -64,3 +64,9 @@
parent: nlbaas-legacy-dsvm-base
run: playbooks/legacy/neutron-lbaasv2-dsvm-py3x-scenario-namespace/run.yaml
post-run: playbooks/legacy/neutron-lbaasv2-dsvm-py3x-scenario-namespace/post.yaml
- job:
name: neutron-lbaasv2-dsvm-api-l7octavia
parent: nlbaas-legacy-dsvm-base
run: playbooks/legacy/neutron-lbaasv2-dsvm-api-l7octavia/run.yaml
post-run: playbooks/legacy/neutron-lbaasv2-dsvm-api-l7octavia/post.yaml

View File

@ -14,6 +14,9 @@
- neutron-lbaasv2-dsvm-scenario-namespace
- neutron-lbaasv2-dsvm-py3x-scenario-namespace:
branches: ^(?!stable/ocata).*$
- neutron-lbaasv2-dsvm-api-l7octavia:
voting: false
gate:
queue: neutron-lbaas
jobs: