diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index 3127352c3e..c6778368e1 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -391,13 +391,15 @@ crl_container_ref-optional: type: string default_pool_id: description: | - The ID of the pool used by the listener if no L7 policies match. + The ID of the pool used by the listener if no L7 policies match. The pool + has some restrictions. See :ref:`valid_protocol`. in: body required: true type: uuid default_pool_id-optional: description: | - The ID of the pool used by the listener if no L7 policies match. + The ID of the pool used by the listener if no L7 policies match. The pool + has some restrictions. See :ref:`valid_protocol`. in: body required: false type: uuid @@ -779,14 +781,16 @@ l7policy-redirect-http-code-optional: l7policy-redirect-pool_id: description: | Requests matching this policy will be redirected to the pool with this ID. - Only valid if ``action`` is ``REDIRECT_TO_POOL``. + Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some + restrictions, See :ref:`valid_protocol`. in: body required: true type: uuid l7policy-redirect-pool_id-optional: description: | Requests matching this policy will be redirected to the pool with this ID. - Only valid if ``action`` is ``REDIRECT_TO_POOL``. + Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some + restrictions, See :ref:`valid_protocol`. in: body required: false type: uuid @@ -948,7 +952,8 @@ listener-id: listener-id-pool-optional: description: | The ID of the listener for the pool. Either ``listener_id`` or - ``loadbalancer_id`` must be specified. + ``loadbalancer_id`` must be specified. The listener has some restrictions, + See :ref:`valid_protocol`. in: body required: false type: uuid diff --git a/api-ref/source/v2/general.inc b/api-ref/source/v2/general.inc index 902606c26c..2b17751e69 100644 --- a/api-ref/source/v2/general.inc +++ b/api-ref/source/v2/general.inc @@ -569,3 +569,55 @@ provisioning status once the asynchronus operation completes. An entity in ``ERROR`` has failed provisioning. The entity may be deleted and recreated. + + +.. _valid_protocol: + +Protocol Combinations +===================== + +The listener and pool can be associated through the listener's +``default_pool_id`` or l7policy's ``redirect_pool_id``. Both listener and pool +must set the protocol parameter. But the association between the listener and +the pool isn't arbitrarily and has some constraints at the protocol aspect. + +Valid protocol combinations +--------------------------- + +.. |1| unicode:: U+2002 .. nut ( ) +.. |2| unicode:: U+2003 .. mutton ( ) +.. |listener| replace:: |2| |2| Listener +.. |1Y| replace:: |1| Y +.. |1N| replace:: |1| N +.. |2Y| replace:: |2| Y +.. |2N| replace:: |2| N +.. |8Y| replace:: |2| |2| |2| |2| Y +.. |8N| replace:: |2| |2| |2| |2| N + ++-------------+-------+--------+------+-------------------+------+ +|| |listener| || HTTP || HTTPS || TCP || TERMINATED_HTTPS || UDP | +|| Pool || || || || || | ++=============+=======+========+======+===================+======+ +| HTTP | |2Y| | |2N| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| HTTPS | |2N| | |2Y| | |1Y| | |8N| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| PROXY | |2Y| | |2Y| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| TCP | |2N| | |2Y| | |1Y| | |8N| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| UDP | |2N| | |2N| | |1N| | |8N| | |1Y| | ++-------------+-------+--------+------+-------------------+------+ + +"Y" means the combination is valid and "N" means invalid. + +The HTTPS protocol is HTTPS pass-through. For most providers, this is treated +as a TCP protocol. Some advanced providers may support HTTPS session +persistence features by using the session ID. The Amphora provider treats +HTTPS as a TCP flow, but currently does not support HTTPS session persistence +using the session ID. + +The pool protocol of PROXY will use the listener protocol as the pool protocol +but will wrap that protocol in the proxy protocol. In the case of listener +protocol TERMINATED_HTTPS, a pool protocol of PROXY will be HTTP wrapped in the +proxy protocol. diff --git a/octavia/api/v2/controllers/base.py b/octavia/api/v2/controllers/base.py index 60b334ff17..88a6002c9b 100644 --- a/octavia/api/v2/controllers/base.py +++ b/octavia/api/v2/controllers/base.py @@ -307,3 +307,15 @@ class BaseController(pecan.rest.RestController): raise exceptions.ValidationException(detail=_( "The CRL specified is not valid for client certificate " "authority reference supplied.")) + + @staticmethod + def _validate_protocol(listener_protocol, pool_protocol): + proto_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + for valid_pool_proto in proto_map[listener_protocol]: + if pool_protocol == valid_pool_proto: + return + detail = _("The pool protocol '%(pool_protocol)s' is invalid while " + "the listener protocol is '%(listener_protocol)s'.") % { + "pool_protocol": pool_protocol, + "listener_protocol": listener_protocol} + raise exceptions.ValidationException(detail=detail) diff --git a/octavia/api/v2/controllers/l7policy.py b/octavia/api/v2/controllers/l7policy.py index a7503ce5c0..2c82e0741d 100644 --- a/octavia/api/v2/controllers/l7policy.py +++ b/octavia/api/v2/controllers/l7policy.py @@ -32,7 +32,6 @@ from octavia.common import exceptions from octavia.common import validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare -from octavia.i18n import _ CONF = cfg.CONF @@ -111,23 +110,12 @@ class L7PolicyController(base.BaseController): # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') - def _escape_l7policy_udp_pool_request(self, pool): - if pool.protocol == constants.PROTOCOL_UDP: - raise exceptions.ValidationException( - detail=_("%s protocol pool can not be assigned to " - "l7policy.") % constants.PROTOCOL_UDP) - @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, body=l7policy_types.L7PolicyRootPOST, status_code=201) def post(self, l7policy_): """Creates a l7policy on a listener.""" l7policy = l7policy_.l7policy context = pecan.request.context.get('octavia_context') - # Make sure any pool specified by redirect_pool_id exists - if l7policy.redirect_pool_id: - db_pool = self._get_db_pool( - context.session, l7policy.redirect_pool_id) - self._escape_l7policy_udp_pool_request(db_pool) # Verify the parent listener exists listener_id = l7policy.listener_id listener = self._get_db_listener( @@ -135,6 +123,11 @@ class L7PolicyController(base.BaseController): load_balancer_id = listener.load_balancer_id l7policy.project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) + # Make sure any pool specified by redirect_pool_id exists + if l7policy.redirect_pool_id: + db_pool = self._get_db_pool( + context.session, l7policy.redirect_pool_id) + self._validate_protocol(listener.protocol, db_pool.protocol) self._auth_validate_action(context, l7policy.project_id, constants.RBAC_POST) @@ -214,13 +207,16 @@ class L7PolicyController(base.BaseController): l7policy_dict[attr] = l7policy_dict.pop(val) sanitized_l7policy = l7policy_types.L7PolicyPUT(**l7policy_dict) context = pecan.request.context.get('octavia_context') + + db_l7policy = self._get_db_l7policy(context.session, id, + show_deleted=False) + listener = self._get_db_listener( + context.session, db_l7policy.listener_id) # Make sure any specified redirect_pool_id exists if l7policy_dict.get('redirect_pool_id'): db_pool = self._get_db_pool( context.session, l7policy_dict['redirect_pool_id']) - self._escape_l7policy_udp_pool_request(db_pool) - db_l7policy = self._get_db_l7policy(context.session, id, - show_deleted=False) + self._validate_protocol(listener.protocol, db_pool.protocol) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( diff --git a/octavia/api/v2/controllers/listener.py b/octavia/api/v2/controllers/listener.py index 35e7781df5..fec4948e65 100644 --- a/octavia/api/v2/controllers/listener.py +++ b/octavia/api/v2/controllers/listener.py @@ -116,11 +116,7 @@ class ListenersController(base.BaseController): if not db_pool: raise exceptions.NotFound( resource=data_models.Pool._name(), id=pool_id) - if (db_pool.protocol == constants.PROTOCOL_UDP and - db_pool.protocol != listener_protocol): - msg = _("Listeners of type %s can only have pools of " - "type UDP.") % constants.PROTOCOL_UDP - raise exceptions.ValidationException(detail=msg) + self._validate_protocol(listener_protocol, db_pool.protocol) def _has_tls_container_refs(self, listener_dict): return (listener_dict.get('tls_certificate_id') or diff --git a/octavia/api/v2/controllers/pool.py b/octavia/api/v2/controllers/pool.py index 62c2deb414..eb9cc2a94f 100644 --- a/octavia/api/v2/controllers/pool.py +++ b/octavia/api/v2/controllers/pool.py @@ -204,6 +204,7 @@ class PoolsController(base.BaseController): elif pool.listener_id: listener = self.repositories.listener.get( context.session, id=pool.listener_id) + self._validate_protocol(listener.protocol, pool.protocol) pool.loadbalancer_id = listener.load_balancer_id pool.project_id, provider = self._get_lb_project_id_provider( context.session, pool.loadbalancer_id) diff --git a/octavia/common/constants.py b/octavia/common/constants.py index ee02d57af1..1c98116fd1 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -205,6 +205,14 @@ DOMAIN_NAME = 'domain_name' UPDATE_STATS = 'UPDATE_STATS' UPDATE_HEALTH = 'UPDATE_HEALTH' +VALID_LISTENER_POOL_PROTOCOL_MAP = { + PROTOCOL_TCP: [PROTOCOL_HTTP, PROTOCOL_HTTPS, + PROTOCOL_PROXY, PROTOCOL_TCP], + PROTOCOL_HTTP: [PROTOCOL_HTTP, PROTOCOL_PROXY], + PROTOCOL_HTTPS: [PROTOCOL_HTTPS, PROTOCOL_PROXY, PROTOCOL_TCP], + PROTOCOL_TERMINATED_HTTPS: [PROTOCOL_HTTP, PROTOCOL_PROXY], + PROTOCOL_UDP: [PROTOCOL_UDP]} + # API Integer Ranges MIN_PORT_NUMBER = 1 MAX_PORT_NUMBER = 65535 diff --git a/octavia/tests/common/constants.py b/octavia/tests/common/constants.py index aec6986e75..6c3934765c 100644 --- a/octavia/tests/common/constants.py +++ b/octavia/tests/common/constants.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +from octavia.common import constants + class MockNovaInterface(object): net_id = None @@ -214,3 +216,22 @@ MOCK_NETWORK_IP_AVAILABILITY = {'network_ip_availability': ( 'total_ips': MOCK_NETWORK_TOTAL_IPS, 'used_ips': MOCK_NETWORK_USED_IPS, 'subnet_ip_availability': MOCK_SUBNET_IP_AVAILABILITY})} + +INVALID_LISTENER_POOL_PROTOCOL_MAP = { + constants.PROTOCOL_HTTP: [constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TCP, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_HTTPS: [constants.PROTOCOL_HTTP, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_TCP: [constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_TERMINATED_HTTPS: [constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TCP, + constants.PROTOCOL_UDP], + constants.PROTOCOL_UDP: [constants.PROTOCOL_TCP, + constants.PROTOCOL_HTTP, + constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_PROXY]} diff --git a/octavia/tests/functional/api/v2/base.py b/octavia/tests/functional/api/v2/base.py index 199a69706a..920ada69ca 100644 --- a/octavia/tests/functional/api/v2/base.py +++ b/octavia/tests/functional/api/v2/base.py @@ -348,6 +348,9 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): response = self.put(path, body, status=202) return response.json + # NOTE: This method should be used cautiously. On load balancers with a + # significant amount of children resources, it will update the status for + # each and every resource and thus taking a lot of DB time. def _set_lb_and_children_statuses(self, lb_id, prov_status, op_status, autodetect=True): self.set_object_status(self.lb_repo, lb_id, @@ -417,6 +420,9 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): provisioning_status=hm_prov, operating_status=op_status) + # NOTE: This method should be used cautiously. On load balancers with a + # significant amount of children resources, it will update the status for + # each and every resource and thus taking a lot of DB time. def set_lb_status(self, lb_id, status=None): explicit_status = True if status is not None else False if not explicit_status: diff --git a/octavia/tests/functional/api/v2/test_l7policy.py b/octavia/tests/functional/api/v2/test_l7policy.py index d5cd0f6859..49d9193df4 100644 --- a/octavia/tests/functional/api/v2/test_l7policy.py +++ b/octavia/tests/functional/api/v2/test_l7policy.py @@ -22,6 +22,7 @@ from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions +from octavia.tests.common import constants as c_const from octavia.tests.functional.api.v2 import base @@ -770,21 +771,6 @@ class TestL7Policy(base.BaseAPITest): 'redirect_pool_id': uuidutils.generate_uuid()} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=404) - def test_bad_create_redirect_to_udp_pool(self): - udp_pool_id = self.create_pool( - self.lb_id, - constants.PROTOCOL_UDP, - constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') - l7policy = { - 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, - 'listener_id': self.listener_id, - 'redirect_pool_id': udp_pool_id} - res = self.post(self.L7POLICIES_PATH, self._build_body(l7policy), - status=400, expect_errors=True) - expect_error_msg = ("Validation failure: %s protocol pool can not be " - "assigned to l7policy.") % constants.PROTOCOL_UDP - self.assertEqual(expect_error_msg, res.json['faultstring']) - def test_bad_create_redirect_to_url(self): l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, @@ -935,27 +921,6 @@ class TestL7Policy(base.BaseAPITest): l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) - def test_bad_update_redirect_to_udp_pool(self): - api_l7policy = self.create_l7policy(self.listener_id, - constants.L7POLICY_ACTION_REJECT, - ).get(self.root_tag) - self.set_lb_status(self.lb_id) - udp_pool_id = self.create_pool( - self.lb_id, - constants.PROTOCOL_UDP, - constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') - self.set_lb_status(self.lb_id) - new_l7policy = { - 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, - 'redirect_pool_id': udp_pool_id} - res = self.put(self.L7POLICY_PATH.format( - l7policy_id=api_l7policy.get('id')), - self._build_body(new_l7policy), - status=400, expect_errors=True) - expect_error_msg = ("Validation failure: %s protocol pool can not be " - "assigned to l7policy.") % constants.PROTOCOL_UDP - self.assertEqual(expect_error_msg, res.json['faultstring']) - def test_bad_update_redirect_to_url(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, @@ -1298,3 +1263,118 @@ class TestL7Policy(base.BaseAPITest): self.delete(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_post(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + + l7policy['listener_id'] = listener.get('id') + l7policy['redirect_pool_id'] = pool.get('id') + self.set_object_status(self.lb_repo, self.lb_id) + self.post(self.L7POLICIES_PATH, + self._build_body(l7policy), status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + + l7policy['listener_id'] = listener.get('id') + l7policy['redirect_pool_id'] = pool.get('id') + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + res = self.post(self.L7POLICIES_PATH, + self._build_body(l7policy), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_put(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + new_l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + l7policy = self.create_l7policy( + listener.get('id'), + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_object_status(self.lb_repo, self.lb_id) + new_l7policy['redirect_pool_id'] = pool.get('id') + + self.put( + self.L7POLICY_PATH.format(l7policy_id=l7policy.get('id')), + self._build_body(new_l7policy), status=200) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + l7policy = self.create_l7policy( + listener.get('id'), + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_object_status(self.lb_repo, self.lb_id) + new_l7policy['redirect_pool_id'] = pool.get('id') + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + res = self.put(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + self._build_body(new_l7policy), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff --git a/octavia/tests/functional/api/v2/test_listener.py b/octavia/tests/functional/api/v2/test_listener.py index 7d1a39940f..aff9fb7e37 100644 --- a/octavia/tests/functional/api/v2/test_listener.py +++ b/octavia/tests/functional/api/v2/test_listener.py @@ -26,6 +26,7 @@ import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api +from octavia.tests.common import constants as c_const from octavia.tests.common import sample_certs from octavia.tests.functional.api.v2 import base @@ -695,9 +696,10 @@ class TestListener(base.BaseAPITest): 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} - expect_error_msg = ("Validation failure: Listeners of type %s can " - "only have pools of " - "type UDP.") % constants.PROTOCOL_UDP + expect_error_msg = ("Validation failure: The pool protocol '%s' is " + "invalid while the listener protocol is '%s'.") % ( + constants.PROTOCOL_UDP, + lb_listener['protocol']) res = self.post(self.LISTENERS_PATH, self._build_body(lb_listener), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) @@ -2489,3 +2491,101 @@ class TestListener(base.BaseAPITest): self.set_lb_status(lb['id'], status=constants.DELETED) self.get(self.LISTENER_PATH.format( listener_id=li.get('id') + "/stats"), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_post(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + listener = {'protocol': listener_proto, + 'protocol_port': port, + 'loadbalancer_id': self.lb_id, + 'default_pool_id': pool.get('id')} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + listener.update( + {'sni_container_refs': [uuidutils.generate_uuid()]}) + body = self._build_body(listener) + self.post(self.LISTENERS_PATH, body, status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in invalid_map: + for pool_proto in invalid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + listener = {'protocol': listener_proto, + 'protocol_port': port, + 'loadbalancer_id': self.lb_id, + 'default_pool_id': pool.get('id')} + body = self._build_body(listener) + res = self.post(self.LISTENERS_PATH, body, + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_put(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + new_listener = {'default_pool_id': pool.get('id')} + res = self.put( + self.LISTENER_PATH.format(listener_id=listener.get('id')), + self._build_body(new_listener), status=200) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + new_listener = {'default_pool_id': pool.get('id')} + res = self.put( + self.LISTENER_PATH.format(listener_id=listener.get('id')), + self._build_body(new_listener), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff --git a/octavia/tests/functional/api/v2/test_load_balancer.py b/octavia/tests/functional/api/v2/test_load_balancer.py index 8aa14e1e7e..65cbd94911 100644 --- a/octavia/tests/functional/api/v2/test_load_balancer.py +++ b/octavia/tests/functional/api/v2/test_load_balancer.py @@ -3075,7 +3075,7 @@ class TestLoadBalancerGraph(base.BaseAPITest): expected_members=[expected_member], create_hm=create_hm, expected_hm=expected_hm, - protocol=constants.PROTOCOL_TCP) + protocol=constants.PROTOCOL_HTTP) create_sni_containers, expected_sni_containers = ( self._get_sni_container_bodies()) create_l7rules, expected_l7rules = self._get_l7rules_bodies() diff --git a/octavia/tests/functional/api/v2/test_pool.py b/octavia/tests/functional/api/v2/test_pool.py index 43e5cec4ab..d17bcb2ffa 100644 --- a/octavia/tests/functional/api/v2/test_pool.py +++ b/octavia/tests/functional/api/v2/test_pool.py @@ -23,6 +23,7 @@ import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api +from octavia.tests.common import constants as c_const from octavia.tests.common import sample_certs from octavia.tests.functional.api.v2 import base @@ -2327,3 +2328,58 @@ class TestPool(base.BaseAPITest): self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_valid_listener_pool_protocol(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + lb_pool = { + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + if listener['default_pool_id'] is None: + lb_pool['protocol'] = pool_proto + lb_pool['listener_id'] = listener.get('id') + self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_invalid_listener_pool_protocol_map(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + lb_pool = { + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + mock_cert_data.return_value = {'sni_certs': [cert]} + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + if listener['default_pool_id'] is None: + lb_pool['protocol'] = pool_proto + lb_pool['listener_id'] = listener.get('id') + res = self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff --git a/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml b/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml new file mode 100644 index 0000000000..badd114016 --- /dev/null +++ b/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Add listener and pool protocol validation. The pool and listener can't be + combined arbitrarily. We need some constraints on the protocol side.