diff --git a/etc/octavia.conf b/etc/octavia.conf index 493b027bfe..dcc55ccb66 100644 --- a/etc/octavia.conf +++ b/etc/octavia.conf @@ -51,6 +51,9 @@ # Default provider driver # default_provider_driver = amphora +# The minimum health monitor delay interval for UDP-CONNECT Health Monitor type +# udp_connect_min_interval_health_monitor = 3 + [database] # This line MUST be changed to actually run the plugin. # Example: diff --git a/octavia/api/v2/controllers/health_monitor.py b/octavia/api/v2/controllers/health_monitor.py index 72d6b9d689..f6c09355a0 100644 --- a/octavia/api/v2/controllers/health_monitor.py +++ b/octavia/api/v2/controllers/health_monitor.py @@ -31,6 +31,7 @@ from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api from octavia.db import prepare as db_prepare +from octavia.i18n import _ CONF = cfg.CONF @@ -148,6 +149,34 @@ class HealthMonitorController(base.BaseController): # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') + def _validate_healthmonitor_request_for_udp(self, request): + invalid_fields = (request.http_method or request.url_path or + request.expected_codes) + is_invalid = (hasattr(request, 'type') and + (request.type != consts.HEALTH_MONITOR_UDP_CONNECT or + invalid_fields)) + if is_invalid: + raise exceptions.ValidationException(detail=_( + "The associated pool protocol is %(pool_protocol)s, so only " + "a %(type)s health monitor is supported.") % { + 'pool_protocol': consts.PROTOCOL_UDP, + 'type': consts.HEALTH_MONITOR_UDP_CONNECT}) + # if the logic arrives here, that means the validation of request above + # is OK. type is UDP-CONNECT, then here we check the healthmonitor + # delay value is matched. + if request.delay: + conf_set = (CONF.api_settings. + udp_connect_min_interval_health_monitor) + if conf_set < 0: + return + elif request.delay < conf_set: + raise exceptions.ValidationException(detail=_( + "The request delay value %(delay)s should be larger than " + "%(conf_set)s for %(type)s health monitor type.") % { + 'delay': request.delay, + 'conf_set': conf_set, + 'type': consts.HEALTH_MONITOR_UDP_CONNECT}) + @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, body=hm_types.HealthMonitorRootPOST, status_code=201) def post(self, health_monitor_): @@ -165,6 +194,15 @@ class HealthMonitorController(base.BaseController): health_monitor.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) + if pool.protocol == consts.PROTOCOL_UDP: + self._validate_healthmonitor_request_for_udp(health_monitor) + else: + if health_monitor.type == consts.HEALTH_MONITOR_UDP_CONNECT: + raise exceptions.ValidationException(detail=_( + "The %(type)s type is only supported for pools of type " + "%(protocol)s.") % {'type': health_monitor.type, + 'protocol': consts.PROTOCOL_UDP}) + self._auth_validate_action(context, health_monitor.project_id, consts.RBAC_POST) @@ -255,7 +293,10 @@ class HealthMonitorController(base.BaseController): self._auth_validate_action(context, project_id, consts.RBAC_PUT) self._validate_update_hm(db_hm, health_monitor) - + # Validate health monitor update options for UDP-CONNECT type. + if (pool.protocol == consts.PROTOCOL_UDP and + db_hm.type == consts.HEALTH_MONITOR_UDP_CONNECT): + self._validate_healthmonitor_request_for_udp(health_monitor) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) diff --git a/octavia/api/v2/controllers/l7policy.py b/octavia/api/v2/controllers/l7policy.py index c67bef4604..b8044bb1ce 100644 --- a/octavia/api/v2/controllers/l7policy.py +++ b/octavia/api/v2/controllers/l7policy.py @@ -32,6 +32,7 @@ from octavia.common import exceptions from octavia.common import validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare +from octavia.i18n import _ CONF = cfg.CONF @@ -114,6 +115,12 @@ class L7PolicyController(base.BaseController): # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') + def _escape_l7policy_udp_pool_request(self, pool): + if pool.protocol == constants.PROTOCOL_UDP: + raise exceptions.ValidationException( + detail=_("%s protocol pool can not be assigned to " + "l7policy.") % constants.PROTOCOL_UDP) + @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, body=l7policy_types.L7PolicyRootPOST, status_code=201) def post(self, l7policy_): @@ -122,8 +129,9 @@ class L7PolicyController(base.BaseController): context = pecan.request.context.get('octavia_context') # Make sure any pool specified by redirect_pool_id exists if l7policy.redirect_pool_id: - self._get_db_pool( + db_pool = self._get_db_pool( context.session, l7policy.redirect_pool_id) + self._escape_l7policy_udp_pool_request(db_pool) # Verify the parent listener exists listener_id = l7policy.listener_id listener = self._get_db_listener( @@ -212,8 +220,9 @@ class L7PolicyController(base.BaseController): context = pecan.request.context.get('octavia_context') # Make sure any specified redirect_pool_id exists if l7policy_dict.get('redirect_pool_id'): - self._get_db_pool( + db_pool = self._get_db_pool( context.session, l7policy_dict['redirect_pool_id']) + self._escape_l7policy_udp_pool_request(db_pool) db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( diff --git a/octavia/api/v2/controllers/listener.py b/octavia/api/v2/controllers/listener.py index 2eb9362a12..30fd3705cd 100644 --- a/octavia/api/v2/controllers/listener.py +++ b/octavia/api/v2/controllers/listener.py @@ -34,6 +34,7 @@ from octavia.common import exceptions from octavia.common import stats from octavia.db import api as db_api from octavia.db import prepare as db_prepare +from octavia.i18n import _ CONF = cfg.CONF @@ -113,13 +114,18 @@ class ListenersController(base.BaseController): raise exceptions.ImmutableObject(resource=db_lb._name(), id=lb_id) - def _validate_pool(self, session, lb_id, pool_id): + def _validate_pool(self, session, lb_id, pool_id, listener_protocol): """Validate pool given exists on same load balancer as listener.""" db_pool = self.repositories.pool.get( session, load_balancer_id=lb_id, id=pool_id) if not db_pool: raise exceptions.NotFound( resource=data_models.Pool._name(), id=pool_id) + if (db_pool.protocol == constants.PROTOCOL_UDP and + db_pool.protocol != listener_protocol): + msg = _("Listeners of type %s can only have pools of " + "type UDP.") % constants.PROTOCOL_UDP + raise exceptions.ValidationException(detail=msg) def _reset_lb_status(self, session, lb_id): # Setting LB back to active because this should be a recoverable error @@ -183,6 +189,10 @@ class ListenersController(base.BaseController): raise exceptions.InvalidOption(value=listener_dict.get('protocol'), option='protocol') + def _is_tls_or_insert_header(self, listener): + return (listener.default_tls_container_ref or + listener.sni_container_refs or listener.insert_headers) + @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, body=listener_types.ListenerRootPOST, status_code=201) def post(self, listener_): @@ -196,7 +206,11 @@ class ListenersController(base.BaseController): self._auth_validate_action(context, listener.project_id, constants.RBAC_POST) - + if (listener.protocol == constants.PROTOCOL_UDP and + self._is_tls_or_insert_header(listener)): + raise exceptions.ValidationException(detail=_( + "%s protocol listener does not support TLS or header " + "insertion.") % constants.PROTOCOL_UDP) if (not CONF.api_settings.allow_tls_terminated_listeners and listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS): raise exceptions.DisabledOption( @@ -220,7 +234,8 @@ class ListenersController(base.BaseController): if listener_dict['default_pool_id']: self._validate_pool(context.session, load_balancer_id, - listener_dict['default_pool_id']) + listener_dict['default_pool_id'], + listener.protocol) self._test_lb_and_listener_statuses( lock_session, lb_id=load_balancer_id) @@ -260,7 +275,8 @@ class ListenersController(base.BaseController): l7policies = listener_dict.pop('l7policies', l7policies) if listener_dict.get('default_pool_id'): self._validate_pool(lock_session, load_balancer_id, - listener_dict['default_pool_id']) + listener_dict['default_pool_id'], + listener_dict['protocol']) db_listener = self._validate_create_listener( lock_session, listener_dict) @@ -304,9 +320,15 @@ class ListenersController(base.BaseController): raise exceptions.ValidationException( detail='No listener object supplied.') + if (db_listener.protocol == constants.PROTOCOL_UDP and + self._is_tls_or_insert_header(listener)): + raise exceptions.ValidationException(detail=_( + "%s protocol listener does not support TLS or header " + "insertion.") % constants.PROTOCOL_UDP) + if listener.default_pool_id: self._validate_pool(context.session, load_balancer_id, - listener.default_pool_id) + listener.default_pool_id, db_listener.protocol) sni_containers = listener.sni_container_refs or [] tls_refs = [sni for sni in sni_containers] diff --git a/octavia/api/v2/controllers/pool.py b/octavia/api/v2/controllers/pool.py index 594dd0bce3..3c78a29e0c 100644 --- a/octavia/api/v2/controllers/pool.py +++ b/octavia/api/v2/controllers/pool.py @@ -127,6 +127,52 @@ class PoolsController(base.BaseController): # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') + def _is_only_specified_in_request(self, request, **kwargs): + request_attrs = [] + check_attrs = kwargs['check_exist_attrs'] + excaped_attrs = ['from_data_model', + 'translate_dict_keys_to_data_model', 'to_dict'] + + for attr in dir(request): + if attr.startswith('_') or attr in excaped_attrs: + continue + else: + request_attrs.append(attr) + + for req_attr in request_attrs: + if (getattr( + request, req_attr) and req_attr not in check_attrs) or ( + not getattr( + request, req_attr) and req_attr in check_attrs): + return False + return True + + def _validate_pool_request_for_udp(self, request): + if request.session_persistence: + if (request.session_persistence.type == + constants.SESSION_PERSISTENCE_SOURCE_IP and + not self._is_only_specified_in_request( + request.session_persistence, + check_exist_attrs=['type', 'persistence_timeout', + 'persistence_granularity'])): + raise exceptions.ValidationException(detail=_( + "session_persistence %s type for UDP protocol " + "only accepts: type, persistence_timeout, " + "persistence_granularity.") % ( + constants.SESSION_PERSISTENCE_SOURCE_IP)) + elif request.session_persistence.cookie_name: + raise exceptions.ValidationException(detail=_( + "Cookie names are not supported for %s pools.") % + constants.PROTOCOL_UDP) + elif request.session_persistence.type in [ + constants.SESSION_PERSISTENCE_HTTP_COOKIE, + constants.SESSION_PERSISTENCE_APP_COOKIE]: + raise exceptions.ValidationException(detail=_( + "Session persistence of type %(type)s is not supported " + "for %(protocol)s protocol pools.") % { + 'type': request.session_persistence.type, + 'protocol': constants.PROTOCOL_UDP}) + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, body=pool_types.PoolRootPOST, status_code=201) def post(self, pool_): @@ -141,7 +187,15 @@ class PoolsController(base.BaseController): # pool_dict: pool = pool_.pool context = pecan.request.context.get('octavia_context') - + if pool.protocol == constants.PROTOCOL_UDP: + self._validate_pool_request_for_udp(pool) + else: + if (pool.session_persistence and ( + pool.session_persistence.persistence_timeout or + pool.session_persistence.persistence_granularity)): + raise exceptions.ValidationException(detail=_( + "persistence_timeout and persistence_granularity " + "is only for UDP protocol pools.")) if pool.loadbalancer_id: pool.project_id, provider = self._get_lb_project_id_provider( context.session, pool.loadbalancer_id) @@ -265,7 +319,21 @@ class PoolsController(base.BaseController): project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) + if (pool.session_persistence and + not pool.session_persistence.type and + db_pool.session_persistence and + db_pool.session_persistence.type): + pool.session_persistence.type = db_pool.session_persistence.type self._auth_validate_action(context, project_id, constants.RBAC_PUT) + if db_pool.protocol == constants.PROTOCOL_UDP: + self._validate_pool_request_for_udp(pool) + else: + if (pool.session_persistence and ( + pool.session_persistence.persistence_timeout or + pool.session_persistence.persistence_granularity)): + raise exceptions.ValidationException(detail=_( + "persistence_timeout and persistence_granularity " + "is only for UDP protocol pools.")) if pool.session_persistence: sp_dict = pool.session_persistence.to_dict(render_unsets=False) diff --git a/octavia/api/v2/types/pool.py b/octavia/api/v2/types/pool.py index 7346c9f064..9d574f9531 100644 --- a/octavia/api/v2/types/pool.py +++ b/octavia/api/v2/types/pool.py @@ -24,6 +24,8 @@ class SessionPersistenceResponse(types.BaseType): """Defines which attributes are to be shown on any response.""" type = wtypes.wsattr(wtypes.text) cookie_name = wtypes.wsattr(wtypes.text) + persistence_timeout = wtypes.wsattr(wtypes.IntegerType()) + persistence_granularity = wtypes.wsattr(types.IPAddressType()) class SessionPersistencePOST(types.BaseType): @@ -32,6 +34,9 @@ class SessionPersistencePOST(types.BaseType): mandatory=True) cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255), default=None) + persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None) + persistence_granularity = wtypes.wsattr(types.IPAddressType(), + default=None) class SessionPersistencePUT(types.BaseType): @@ -39,6 +44,9 @@ class SessionPersistencePUT(types.BaseType): type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES)) cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255), default=None) + persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None) + persistence_granularity = wtypes.wsattr(types.IPAddressType(), + default=None) class BasePoolType(types.BaseType): diff --git a/octavia/common/config.py b/octavia/common/config.py index 925d93ad5f..67884ff929 100644 --- a/octavia/common/config.py +++ b/octavia/common/config.py @@ -114,6 +114,11 @@ api_opts = [ 'driver.'}), cfg.StrOpt('default_provider_driver', default='amphora', help=_('Default provider driver.')), + cfg.IntOpt('udp_connect_min_interval_health_monitor', + default=3, + help=_("The minimum health monitor delay interval for the " + "UDP-CONNECT Health Monitor type. A negative integer " + "value means 'no limit'.")), ] # Options only used by the amphora agent diff --git a/octavia/common/constants.py b/octavia/common/constants.py index dd49d8c8d1..f33b6a38c3 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -32,9 +32,11 @@ HEALTH_MONITOR_HTTP = 'HTTP' HEALTH_MONITOR_HTTPS = 'HTTPS' HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO' HEALTH_MONITOR_UDP_CONNECT = 'UDP-CONNECT' +UDP_CONNECT_SCRIPT_MIN_INTERVAL = 3 SUPPORTED_HEALTH_MONITOR_TYPES = (HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP, - HEALTH_MONITOR_TLS_HELLO) + HEALTH_MONITOR_TLS_HELLO, + HEALTH_MONITOR_UDP_CONNECT) HEALTH_MONITOR_HTTP_METHOD_GET = 'GET' HEALTH_MONITOR_HTTP_METHOD_HEAD = 'HEAD' HEALTH_MONITOR_HTTP_METHOD_POST = 'POST' @@ -72,7 +74,7 @@ PROTOCOL_HTTPS = 'HTTPS' PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS' PROTOCOL_PROXY = 'PROXY' SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP, - PROTOCOL_TERMINATED_HTTPS, PROTOCOL_PROXY) + PROTOCOL_TERMINATED_HTTPS, PROTOCOL_PROXY, PROTOCOL_UDP) # API Integer Ranges MIN_PORT_NUMBER = 1 diff --git a/octavia/common/data_models.py b/octavia/common/data_models.py index 38d6067303..318ec73634 100644 --- a/octavia/common/data_models.py +++ b/octavia/common/data_models.py @@ -149,11 +149,14 @@ class BaseDataModel(object): class SessionPersistence(BaseDataModel): def __init__(self, pool_id=None, type=None, cookie_name=None, - pool=None): + pool=None, persistence_timeout=None, + persistence_granularity=None): self.pool_id = pool_id self.type = type self.cookie_name = cookie_name self.pool = pool + self.persistence_timeout = persistence_timeout + self.persistence_granularity = persistence_granularity def delete(self): self.pool.session_persistence = None diff --git a/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py b/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py new file mode 100644 index 0000000000..0297ca3b16 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py @@ -0,0 +1,62 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extend some necessary fields for udp support + +Revision ID: 76aacf2e176c +Revises: ebbcc72b4e5e +Create Date: 2018-01-01 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '76aacf2e176c' +down_revision = 'ebbcc72b4e5e' + +tables = [u'protocol', u'health_monitor_type'] +new_fields = ['UDP', 'UDP-CONNECT'] + + +def upgrade(): + # New UDP protocol addition. + # New UDP_CONNNECT healthmonitor type addition. + for table, new_field in zip(tables, new_fields): + insert_table = sql.table( + table, + sql.column(u'name', sa.String), + sql.column(u'description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': new_field} + ] + ) + + # Two new columns add to session_persistence table + op.add_column('session_persistence', + sa.Column('persistence_timeout', + sa.Integer(), + nullable=True, server_default=None)) + op.add_column('session_persistence', + sa.Column('persistence_granularity', + sa.String(length=64), + nullable=True, server_default=None)) diff --git a/octavia/db/models.py b/octavia/db/models.py index d7adff989b..c7924e81e7 100644 --- a/octavia/db/models.py +++ b/octavia/db/models.py @@ -130,6 +130,8 @@ class SessionPersistence(base_models.BASE): name="fk_session_persistence_session_persistence_type_name"), nullable=False) cookie_name = sa.Column(sa.String(255), nullable=True) + persistence_timeout = sa.Column(sa.Integer(), nullable=True) + persistence_granularity = sa.Column(sa.String(64), nullable=True) pool = orm.relationship("Pool", uselist=False, backref=orm.backref("session_persistence", uselist=False, diff --git a/octavia/tests/functional/api/v2/test_health_monitor.py b/octavia/tests/functional/api/v2/test_health_monitor.py index fb8a10c9a4..d78708ef93 100644 --- a/octavia/tests/functional/api/v2/test_health_monitor.py +++ b/octavia/tests/functional/api/v2/test_health_monitor.py @@ -53,6 +53,30 @@ class TestHealthMonitor(base.BaseAPITest): self.pool_with_listener_id = ( self.pool_with_listener.get('pool').get('id')) self.set_lb_status(self.lb_id) + self._setup_udp_lb_resources() + + def _setup_udp_lb_resources(self): + self.udp_lb = self.create_load_balancer(uuidutils.generate_uuid()).get( + 'loadbalancer') + self.udp_lb_id = self.udp_lb.get('id') + self.set_lb_status(self.udp_lb_id) + + self.udp_listener = self.create_listener( + constants.PROTOCOL_UDP, 8888, + self.udp_lb_id).get('listener') + self.udp_listener_id = self.udp_listener.get('id') + self.set_lb_status(self.udp_lb_id) + + self.udp_pool_with_listener = self.create_pool( + None, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.udp_listener_id) + self.udp_pool_with_listener_id = ( + self.udp_pool_with_listener.get('pool').get('id')) + self.set_lb_status(self.udp_lb_id) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group='api_settings', + udp_connect_min_interval_health_monitor='3') def test_get(self): api_hm = self.create_health_monitor( @@ -656,6 +680,144 @@ class TestHealthMonitor(base.BaseAPITest): self.assertEqual('/', api_hm.get('url_path')) self.assertEqual('200', api_hm.get('expected_codes')) + def test_create_udp_case(self): + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.udp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_UDP_CONNECT, + api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify the L7 fields is None + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_udp_case_when_udp_connect_min_interval_health_monitor_set(self): + # negative case first + req_dict = {'pool_id': self.udp_pool_with_listener_id, + 'type': constants.HEALTH_MONITOR_UDP_CONNECT, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, + expect_errors=True) + expect_error_msg = ("Validation failure: The request delay value 1 " + "should be larger than 3 for %s health monitor " + "type.") % constants.HEALTH_MONITOR_UDP_CONNECT + + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + # pass cases + self.conf.config( + group='api_settings', + udp_connect_min_interval_health_monitor='-3') + res = self.post(self.HMS_PATH, self._build_body(req_dict)) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, + hm_id=res.json['healthmonitor']['id'], + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + + def test_negative_create_udp_case(self): + req_dict = {'pool_id': self.udp_pool_with_listener_id, + 'delay': 3, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + expect_error_msg = ("Validation failure: The associated pool protocol " + "is %(pool_protocol)s, so only a %(type)s health " + "monitor is supported.") % { + 'pool_protocol': constants.PROTOCOL_UDP, + 'type': constants.HEALTH_MONITOR_UDP_CONNECT} + + # Not allowed types, url_path, expected_codes specified. + update_req = {'type': constants.HEALTH_MONITOR_TCP} + req_dict.update(update_req) + res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + update_req = {'type': constants.HEALTH_MONITOR_UDP_CONNECT} + req_dict.update(update_req) + for req in [{'http_method': + constants.HEALTH_MONITOR_HTTP_METHOD_GET}, + {'url_path': constants.HEALTH_MONITOR_DEFAULT_URL_PATH}, + {'expected_codes': + constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES}]: + req_dict.update(req) + res = self.post(self.HMS_PATH, self._build_body(req_dict), + status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + # Hit error during create with a non-UDP pool + req_dict = {'pool_id': self.pool_with_listener_id, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'type': constants.HEALTH_MONITOR_UDP_CONNECT} + expect_error_msg = ("Validation failure: The %(type)s type is only " + "supported for pools of type " + "%(protocol)s.") % { + 'type': constants.HEALTH_MONITOR_UDP_CONNECT, + 'protocol': constants.PROTOCOL_UDP} + res = self.post(self.HMS_PATH, self._build_body(req_dict), + status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + def test_ensure_L7_fields_filled_during_create(self): + # Create a health monitor with a load balancer pool + api_hm = self.create_health_monitor( + self.pool_id, + constants.PROTOCOL_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP_DEFAULT_METHOD, + api_hm.get('http_method')) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, + api_hm.get('url_path')) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, + api_hm.get('expected_codes')) + def test_create_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') @@ -1037,6 +1199,61 @@ class TestHealthMonitor(base.BaseAPITest): pool_prov_status=constants.ACTIVE, hm_prov_status=constants.ACTIVE) + def test_update_udp_case(self): + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get( + self.root_tag) + self.set_lb_status(self.udp_lb_id) + new_hm = {'max_retries': 2} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + + def test_negative_update_udp_case(self): + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get( + self.root_tag) + self.set_lb_status(self.udp_lb_id) + + # Hit error during update with invalid parameter + req_dict = {'delay': 3, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} + expect_error_msg = (("http_method is not a valid option for health " + "monitors of type %s") % + constants.HEALTH_MONITOR_UDP_CONNECT) + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + # Hit error during update with smaller delay value + req_dict = {'delay': 1} + expect_error_msg = ("Validation failure: The request delay value 1 " + "should be larger than 3 for %s health monitor " + "type.") % constants.HEALTH_MONITOR_UDP_CONNECT + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + def test_bad_update(self): api_hm = self.create_health_monitor(self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, diff --git a/octavia/tests/functional/api/v2/test_l7policy.py b/octavia/tests/functional/api/v2/test_l7policy.py index 35fc392c11..5b1d960fe5 100644 --- a/octavia/tests/functional/api/v2/test_l7policy.py +++ b/octavia/tests/functional/api/v2/test_l7policy.py @@ -626,6 +626,21 @@ class TestL7Policy(base.BaseAPITest): 'redirect_pool_id': uuidutils.generate_uuid()} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=404) + def test_bad_create_redirect_to_udp_pool(self): + udp_pool_id = self.create_pool( + self.lb_id, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') + l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'listener_id': self.listener_id, + 'redirect_pool_id': udp_pool_id} + res = self.post(self.L7POLICIES_PATH, self._build_body(l7policy), + status=400, expect_errors=True) + expect_error_msg = ("Validation failure: %s protocol pool can not be " + "assigned to l7policy.") % constants.PROTOCOL_UDP + self.assertEqual(expect_error_msg, res.json['faultstring']) + def test_bad_create_redirect_to_url(self): l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, @@ -764,6 +779,27 @@ class TestL7Policy(base.BaseAPITest): l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) + def test_bad_update_redirect_to_udp_pool(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + udp_pool_id = self.create_pool( + self.lb_id, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool_id': udp_pool_id} + res = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), + status=400, expect_errors=True) + expect_error_msg = ("Validation failure: %s protocol pool can not be " + "assigned to l7policy.") % constants.PROTOCOL_UDP + self.assertEqual(expect_error_msg, res.json['faultstring']) + def test_bad_update_redirect_to_url(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, diff --git a/octavia/tests/functional/api/v2/test_listener.py b/octavia/tests/functional/api/v2/test_listener.py index a7e1983c0d..61d4db689b 100644 --- a/octavia/tests/functional/api/v2/test_listener.py +++ b/octavia/tests/functional/api/v2/test_listener.py @@ -539,6 +539,60 @@ class TestListener(base.BaseAPITest): 'Value should be greater or equal to {0}'.format( constants.MIN_TIMEOUT), fault) + def test_create_udp_case(self): + api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, + self.lb_id).get(self.root_tag) + self.assertEqual(constants.PROTOCOL_UDP, api_listener.get('protocol')) + self.assertEqual(6666, api_listener.get('protocol_port')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=api_listener.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_CREATE, + listener_op_status=constants.OFFLINE) + + def test_negative_create_udp_case(self): + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + req_dict = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_UDP, + 'protocol_port': 6666, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'insert_headers': { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true"}, + 'loadbalancer_id': self.lb_id} + expect_error_msg = ( + "Validation failure: %s protocol listener does not support TLS or " + "header insertion.") % constants.PROTOCOL_UDP + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + # Default pool protocol is udp which is different with listener + # protocol. + udp_pool_id = self.create_pool( + self.lb_id, constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') + self.set_lb_status(self.lb_id) + lb_listener = {'name': 'listener1', + 'default_pool_id': udp_pool_id, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + expect_error_msg = ("Validation failure: Listeners of type %s can " + "only have pools of " + "type UDP.") % constants.PROTOCOL_UDP + res = self.post(self.LISTENERS_PATH, self._build_body(lb_listener), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + def test_create_duplicate_fails(self): self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) @@ -830,6 +884,31 @@ class TestListener(base.BaseAPITest): self.assert_final_listener_statuses(self.lb_id, api_listener['id']) + def test_negative_update_udp_case(self): + api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, + self.lb_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + new_listener = {'name': 'new-listener', + 'admin_state_up': True, + 'connection_limit': 10, + 'default_tls_container_ref': + uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'insert_headers': { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true"}} + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + expect_error_msg = ( + "Validation failure: %s protocol listener does not support TLS or " + "header insertion.") % constants.PROTOCOL_UDP + res = self.put(listener_path, self._build_body(new_listener), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + def test_update_bad_listener_id(self): self.put(self.listener_path.format(listener_id='SEAN-CONNERY'), body={}, status=404) diff --git a/octavia/tests/functional/api/v2/test_pool.py b/octavia/tests/functional/api/v2/test_pool.py index cd8d2dcbd1..0bb857cdd9 100644 --- a/octavia/tests/functional/api/v2/test_pool.py +++ b/octavia/tests/functional/api/v2/test_pool.py @@ -47,6 +47,19 @@ class TestPool(base.BaseAPITest): self.listener_id = self.listener.get('id') self.set_lb_status(self.lb_id) + self._setup_udp_lb_resources() + + def _setup_udp_lb_resources(self): + self.udp_lb = self.create_load_balancer(uuidutils.generate_uuid()).get( + 'loadbalancer') + self.udp_lb_id = self.udp_lb.get('id') + self.set_lb_status(self.udp_lb_id) + + self.udp_listener = self.create_listener( + constants.PROTOCOL_UDP, 8888, + self.udp_lb_id).get('listener') + self.udp_listener_id = self.udp_listener.get('id') + self.set_lb_status(self.udp_lb_id) def test_get(self): api_pool = self.create_pool( @@ -717,6 +730,120 @@ class TestPool(base.BaseAPITest): **optionals).get(self.root_tag) self.assertEqual(self.project_id, api_pool.get('project_id')) + def test_create_udp_case_source_ip(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + api_pool = self.create_pool( + None, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.udp_listener_id, + session_persistence=sp).get(self.root_tag) + self.assertEqual(constants.PROTOCOL_UDP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assertEqual(constants.SESSION_PERSISTENCE_SOURCE_IP, + api_pool.get('session_persistence')['type']) + self.assertEqual(3, api_pool.get( + 'session_persistence')['persistence_timeout']) + self.assertEqual('255.255.255.0', api_pool.get( + 'session_persistence')['persistence_granularity']) + self.assertIsNone(api_pool.get( + 'session_persistence')['cookie_name']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + + def test_negative_create_udp_case(self): + # Error create pool with udp protocol but non-udp-type + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": 'test-cookie-name'} + req_dict = { + 'listener_id': self.udp_listener_id, + 'protocol': constants.PROTOCOL_UDP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + expect_error_msg = ("Validation failure: Cookie names are not " + "supported for %s pools.") % constants.PROTOCOL_UDP + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error create pool with any non-udp-types and udp session persistence + # options. + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + req_dict = { + 'listener_id': self.udp_listener_id, + 'protocol': constants.PROTOCOL_UDP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': None} + for type in [constants.SESSION_PERSISTENCE_HTTP_COOKIE, + constants.SESSION_PERSISTENCE_APP_COOKIE]: + expect_error_msg = ("Validation failure: Session persistence of " + "type %s is not supported for %s protocol " + "pools.") % (type, constants.PROTOCOL_UDP) + sp.update({'type': type}) + req_dict['session_persistence'] = sp + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error create pool with source ip session persistence and wrong + # options. + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0', + "cookie_name": 'test-cookie-name'} + req_dict = { + 'listener_id': self.udp_listener_id, + 'protocol': constants.PROTOCOL_UDP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + expect_error_msg = ( + "Validation failure: session_persistence %s type for %s " + "protocol only accepts: type, persistence_timeout, " + "persistence_granularity.") % ( + constants.SESSION_PERSISTENCE_SOURCE_IP, constants.PROTOCOL_UDP) + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error create non-udp pool with udp session persistence + sps = [{"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'}, + {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'}] + req_dict = { + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} + expect_error_msg = ("Validation failure: persistence_timeout and " + "persistence_granularity is only for %s protocol " + "pools.") % constants.PROTOCOL_UDP + for s in sps: + req_dict.update({'session_persistence': s}) + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id) + def test_bad_create(self): pool = {'name': 'test1'} self.post(self.POOLS_PATH, self._build_body(pool), status=400) @@ -869,6 +996,115 @@ class TestPool(base.BaseAPITest): self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.ACTIVE) + def test_update_get_session_persistence_from_db_if_no_request(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + optionals = {"listener_id": self.udp_listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + None, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.udp_lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + ty = sess_p.pop('type') + sess_p['persistence_timeout'] = 4 + sess_p['persistence_granularity'] = "255.255.0.0" + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + sess_p['type'] = ty + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sess_p, response.get('session_persistence')) + self.assert_correct_status( + listener_id=self.udp_listener_id, + pool_id=api_pool.get('id'), + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + def test_update_udp_case_source_ip(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + optionals = {"listener_id": self.udp_listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + None, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.udp_lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['persistence_timeout'] = 4 + sess_p['persistence_granularity'] = "255.255.0.0" + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sess_p, response.get('session_persistence')) + self.assert_correct_status( + listener_id=self.udp_listener_id, + pool_id=api_pool.get('id'), + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + self.set_lb_status(self.udp_lb_id) + self.set_object_status(self.pool_repo, api_pool.get('id')) + # Negative cases + # Error during update pool with non-UDP type and cookie_name. + expect_error_msg = ( + "Validation failure: Cookie names are not supported for %s" + " pools.") % constants.PROTOCOL_UDP + sess_p['type'] = constants.SESSION_PERSISTENCE_HTTP_COOKIE + sess_p['cookie_name'] = 'test-cookie-name' + new_pool = {'session_persistence': sess_p} + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error during update pool with source ip type and more options. + expect_error_msg = ( + "Validation failure: session_persistence %s type for %s protocol " + "only accepts: type, persistence_timeout, " + "persistence_granularity.") % ( + constants.SESSION_PERSISTENCE_SOURCE_IP, constants.PROTOCOL_UDP) + sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP + sess_p['cookie_name'] = 'test-cookie-name' + sess_p['persistence_timeout'] = 4 + sess_p['persistence_granularity'] = "255.255.0.0" + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error during update pool with non-UDP session persistence type. + sess_p['cookie_name'] = None + for ty in [constants.SESSION_PERSISTENCE_APP_COOKIE, + constants.SESSION_PERSISTENCE_HTTP_COOKIE]: + expect_error_msg = ("Validation failure: Session persistence of " + "type %s is not supported for %s protocol " + "pools.") % (ty, constants.PROTOCOL_UDP) + sess_p['type'] = ty + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + def test_bad_update(self): api_pool = self.create_pool( self.lb_id, @@ -899,6 +1135,27 @@ class TestPool(base.BaseAPITest): self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) + def test_bad_update_non_udp_pool_with_udp_fields(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + self.set_lb_status(self.lb_id) + new_pool = {'session_persistence': sp} + expect_error_msg = ("Validation failure: persistence_timeout and " + "persistence_granularity is only for %s " + "protocol pools.") % constants.PROTOCOL_UDP + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + def test_delete(self): api_pool = self.create_pool( self.lb_id, @@ -1131,7 +1388,9 @@ class TestPool(base.BaseAPITest): def test_add_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, - "cookie_name": "test_cookie_name"} + "cookie_name": "test_cookie_name", + 'persistence_granularity': None, + 'persistence_timeout': None} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, @@ -1182,7 +1441,9 @@ class TestPool(base.BaseAPITest): def test_update_preserve_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, - "cookie_name": "test_cookie_name"} + "cookie_name": "test_cookie_name", + 'persistence_granularity': None, + 'persistence_timeout': None} optionals = {"listener_id": self.listener_id, "name": "name", "session_persistence": sp} api_pool = self.create_pool( diff --git a/octavia/tests/functional/db/test_repositories.py b/octavia/tests/functional/db/test_repositories.py index df88e3a94f..82f03cfb75 100644 --- a/octavia/tests/functional/db/test_repositories.py +++ b/octavia/tests/functional/db/test_repositories.py @@ -185,7 +185,9 @@ class AllRepositoriesTest(base.OctaviaDBTestBase): 'provisioning_status': constants.ACTIVE} sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_monster', - 'pool_id': pool['id']} + 'pool_id': pool['id'], + 'persistence_granularity': None, + 'persistence_timeout': None} pool.update({'session_persistence': sp}) pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) @@ -248,7 +250,9 @@ class AllRepositoriesTest(base.OctaviaDBTestBase): 'provisioning_status': constants.ACTIVE} sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_monster', - 'pool_id': pool['id']} + 'pool_id': pool['id'], + 'persistence_granularity': None, + 'persistence_timeout': None} pool.update({'session_persistence': sp}) pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) @@ -287,7 +291,9 @@ class AllRepositoriesTest(base.OctaviaDBTestBase): self.session, pool, listener_id=self.listener.id) update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} update_sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, - 'cookie_name': 'monster_cookie'} + 'cookie_name': 'monster_cookie', + 'persistence_granularity': None, + 'persistence_timeout': None} update_pool.update({'session_persistence': update_sp}) new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool)