Shared pools support

This patch introduces shared pools functionality to
Octavia. This means that with this patch, listeners and
pools will have the ability to have a N:M relationship
instead of a simple 1:1 relationship, although they must
still be associated with the same loadbalancer object.

This patch includes a schema change to the database: pools
are now associated directly to loadbalancers instead of
listeners. The migration in this patch includes ETL which
should populate this new field in the pool table correctly.

Extensive API changes were necessary to facilitate this
change. However, all the changes to the API should be
backward compatible.

This patch is a necessary precursor to adding L7 switching
functionality to Octavia.

Partially-Implements: blueprint lbaas-l7-rules
Partially-Implements: blueprint layer-7-switching
Change-Id: I797c718412e756be067dd4c304c989a4d43bb8ef
This commit is contained in:
Stephen Balukoff 2015-12-11 03:27:05 -08:00
parent 3a49fe2dcf
commit c3e97b7630
42 changed files with 1368 additions and 736 deletions

View File

@ -317,7 +317,10 @@ Listeners
| default_tls\ | String | Barbican ``UUID`` for TLS container |
| _container_id | | |
+---------------------+------------+-------------------------------------+
| project_id | String | ``UUID`` for project |
| default_pool_id | UUID | ``UUID`` of the pool to which \ |
| | | requests will be routed by default |
+---------------------+------------+-------------------------------------+
| project_id | String | ``UUID`` for project |
+---------------------+------------+-------------------------------------+
| name | String | String detailing the name of the \ |
| | | listener |
@ -362,7 +365,8 @@ Retrieve a list of listeners.
'protocol_port': 80,
'id': 'uuid',
'operating_status': 'ONLINE',
'name': 'listener_name'
'name': 'listener_name',
'default_pool_id': 'uuid'
}
@ -394,7 +398,8 @@ Retrieve details of a listener.
'protocol_port': 80,
'id': 'uuid',
'operating_status': 'ONLINE',
'name': 'listener_name'
'name': 'listener_name',
'default_pool_id': 'uuid'
}
List Listener Statistics
@ -405,7 +410,7 @@ Retrieve the stats of a listener.
+----------------+-----------------------------------------------------------+
| Request Type | ``GET`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}``\ |
| | ``/stats`` |
+----------------+---------+-------------------------------------------------+
| | Success | 200 |
@ -457,6 +462,8 @@ Create a listener.
+------------------+----------+
| description | no |
+------------------+----------+
| default_pool_id | no |
+------------------+----------+
| enabled | no |
+------------------+----------+
@ -469,6 +476,7 @@ Create a listener.
'default_tls_container_id': 'uuid',
'name': 'listener_name',
'description': 'listener_description',
'default_pool_id': 'uuid',
'enabled': true
}
@ -485,7 +493,8 @@ Create a listener.
'protocol_port': 88,
'id': 'uuid',
'operating_status': 'OFFLINE',
'name': 'listener_name'
'name': 'listener_name',
'default_pool_id': 'uuid'
}
Update Listener
@ -521,6 +530,8 @@ Modify mutable fields of a listener.
+------------------+----------+
| description | no |
+------------------+----------+
| default_pool_id | no |
+------------------+----------+
| enabled | no |
+------------------+----------+
@ -533,6 +544,7 @@ Modify mutable fields of a listener.
'default_tls_container_id': 'uuid',
'name': 'listener_name',
'description': 'listener_description',
'default_pool_id': 'uuid',
'enabled': true
}
@ -549,7 +561,8 @@ Modify mutable fields of a listener.
'protocol_port': 88,
'id': 'uuid',
'operating_status': 'ONLINE',
'name': 'listener_name'
'name': 'listener_name',
'default_pool_id': 'uuid'
}
Delete Listener
@ -618,13 +631,19 @@ Pools
List Pools
**********
Retrieve a list of pools.
Retrieve a list of pools on a loadbalancer. This API endpoint
will list all pools on a loadbalancer or optionally all the active pools
on a listener, depending on whether the ``listener_id`` query string is
appended below.
+----------------+-----------------------------------------------------------+
| Request Type | ``GET`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| | ``/pools`` |
| Endpoints | ``URL/v1/loadbalancers/{lb_id}/pools``\ |
| | ``[?listener_id={listener_id}]`` |
| | |
| | **DEPRECATED** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools`` |
+----------------+---------+-------------------------------------------------+
| | Success | 200 |
| Response Codes +---------+-------------------------------------------------+
@ -657,8 +676,10 @@ Retrieve details of a pool.
+----------------+-----------------------------------------------------------+
| Request Type | ``GET`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| | ``/pools/{pool_id}`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/pools/{pool_id}`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}`` |
+----------------+---------+-------------------------------------------------+
| | Success | 200 |
| Response Codes +---------+-------------------------------------------------+
@ -689,8 +710,10 @@ Create a pool.
+----------------+-----------------------------------------------------------+
| Request Type | ``POST`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| | ``/pools`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/pools`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -754,8 +777,10 @@ Modify mutable attributes of a pool.
+----------------+-----------------------------------------------------------+
| Request Type | ``PUT`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| | ``/pools/{pool_id}`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/pools/{pool_id}`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -818,8 +843,10 @@ Delete a pool.
+----------------+-----------------------------------------------------------+
| Request Type | ``DELETE`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| | ``/pools/{pool_id}`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/pools/{pool_id}`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -874,8 +901,12 @@ Retrieve details of a health monitor.
+----------------+-----------------------------------------------------------+
| Request Type | ``GET`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/health_monitor`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/health_monitor`` |
+----------------+---------+-------------------------------------------------+
| | Success | 200 |
| Response Codes +---------+-------------------------------------------------+
@ -904,8 +935,12 @@ Create a health monitor.
+----------------+-----------------------------------------------------------+
| Request Type | ``POST`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/health_monitor`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/health_monitor`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -972,8 +1007,12 @@ Modify mutable attributes of a health monitor.
+----------------+-----------------------------------------------------------+
| Request Type | ``PUT`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/health_monitor`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/health_monitor`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -1040,8 +1079,12 @@ Delete a health monitor.
+----------------+-----------------------------------------------------------+
| Request Type | ``DELETE`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/health_monitor`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/health_monitor`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -1081,8 +1124,12 @@ Retrieve a list of pool members.
+----------------+-----------------------------------------------------------+
| Request Type | ``GET`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/members`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/members`` |
+----------------+---------+-------------------------------------------------+
| | Success | 200 |
| Response Codes +---------+-------------------------------------------------+
@ -1112,8 +1159,12 @@ Retrieve details of a pool member.
+----------------+-----------------------------------------------------------+
| Request Type | ``GET`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/members/{member_id}`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/members/{member_id}`` |
+----------------+---------+-------------------------------------------------+
| | Success | 200 |
| Response Codes +---------+-------------------------------------------------+
@ -1140,8 +1191,12 @@ Create a pool member.
+----------------+-----------------------------------------------------------+
| Request Type | ``POST`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| | ``/pools/{pool_id}/health_monitor`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/members`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/members`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -1194,8 +1249,12 @@ Modify mutable attributes of a pool member.
+----------------+-----------------------------------------------------------+
| Request Type | ``PUT`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/members/{member_id}`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/members/{member_id}`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+
@ -1242,8 +1301,12 @@ Delete a pool member.
+----------------+-----------------------------------------------------------+
| Request Type | ``DELETE`` |
+----------------+-----------------------------------------------------------+
| Endpoint | ``URL/v1/loadbalancers/{lb_id}/listeners/{listener_id}\`` |
| Endpoint | ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/pools/{pool_id}/members/{member_id}`` |
| | |
| | **DEPRECATED:** ``URL/v1/loadbalancers/{lb_id}``\ |
| | ``/listeners/{listener_id}/pools/{pool_id}``\ |
| | ``/members/{member_id}`` |
+----------------+---------+-------------------------------------------------+
| | Success | 202 |
| Response Codes +---------+-------------------------------------------------+

View File

@ -180,6 +180,7 @@ class JinjaTemplater(object):
if listener.default_pool:
ret_value['default_pool'] = self._transform_pool(
listener.default_pool)
# TODO(sbalukoff): Handle pools referenced by L7Policies
return ret_value
def _transform_pool(self, pool):

View File

@ -29,4 +29,7 @@
{% if loadbalancer.listener.default_pool %}
{{ backend_macro(constants, loadbalancer.listener, loadbalancer.listener.default_pool) }}
{% endif %}
{% endblock proxies %}
{# TODO(sbalukoff): Will need to add pools referenced by L7Policies attached
# to the listener, but ensure each only gets listed once, eh.
#}
{% endblock proxies %}

View File

@ -51,8 +51,8 @@ frontend {{ listener.id }}
{% if listener.connection_limit is defined %}
maxconn {{ listener.connection_limit }}
{% endif %}
{% if listener.protocol_mode == constants.PROTOCOL_HTTP.lower() %}
option forwardfor
{% if listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %}
redirect scheme https if !{ ssl_fc }
{% endif %}
{{ bind_macro(constants, listener, lb_vip_address)|trim() }}
mode {{ listener.protocol_mode }}
@ -65,9 +65,6 @@ frontend {{ listener.id }}
backend {{ pool.id }}
mode {{ pool.protocol }}
balance {{ pool.lb_algorithm }}
{% if listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS %}
redirect scheme https if !{ ssl_fc }
{% endif %}
{% if pool.session_persistence %}
{% if pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP %}
{% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %}
@ -98,7 +95,7 @@ backend {{ pool.id }}
option ssl-hello-chk
{% endif %}
{% endif %}
{% if listener.protocol_mode == constants.PROTOCOL_HTTP.lower() %}
{% if pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() %}
option forwardfor
{% endif %}
{% for member in pool.members %}

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -32,28 +33,54 @@ LOG = logging.getLogger(__name__)
class HealthMonitorController(base.BaseController):
def __init__(self, load_balancer_id, listener_id, pool_id):
def __init__(self, load_balancer_id, pool_id, listener_id=None):
super(HealthMonitorController, self).__init__()
self.load_balancer_id = load_balancer_id
self.listener_id = listener_id
self.pool_id = pool_id
self.handler = self.handler.health_monitor
def _get_db_hm(self, session):
"""Gets the current health monitor object from the database."""
db_hm = self.repositories.health_monitor.get(
session, pool_id=self.pool_id)
if not db_hm:
LOG.info(_LI("Health Monitor for Pool %s was not found"),
self.pool_id)
raise exceptions.NotFound(
resource=data_models.HealthMonitor._name(),
id=self.pool_id)
return db_hm
@wsme_pecan.wsexpose(hm_types.HealthMonitorResponse)
def get_all(self):
"""Gets a single health monitor's details."""
# NOTE(blogan): since a pool can only have one health monitor
# we are using the get_all method to only get the single health monitor
context = pecan.request.context.get('octavia_context')
db_hm = self.repositories.health_monitor.get(
context.session, pool_id=self.pool_id)
if not db_hm:
LOG.info(_LI("Health Monitor for Pool %s was not found"),
self.pool_id)
raise exceptions.NotFound(
resource=data_models.HealthMonitor._name(), id=id)
db_hm = self._get_db_hm(context.session)
return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
def _test_lb_and_listener_statuses(self, session, hm=None):
"""Verify load balancer is in a mutable state."""
# We need to verify that any listeners referencing this pool are also
# mutable
listener_ids = []
if hm:
listener_ids = [l.id for l in hm.pool.listeners]
if self.listener_id and self.listener_id not in listener_ids:
listener_ids.append(self.listener_id)
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
session, self.load_balancer_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
listener_ids=listener_ids):
LOG.info(_LI("Health Monitor cannot be created or modified "
"because the Load Balancer is in an immutable state"))
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
@wsme_pecan.wsexpose(hm_types.HealthMonitorResponse,
body=hm_types.HealthMonitorPOST, status_code=202)
def post(self, health_monitor):
@ -68,31 +95,21 @@ class HealthMonitorController(base.BaseController):
pass
hm_dict = health_monitor.to_dict()
hm_dict['pool_id'] = self.pool_id
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Health Monitor for Pool %s cannot be updated "
"because the Load Balancer is immutable."),
self.pool_id)
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
self._test_lb_and_listener_statuses(context.session)
try:
db_hm = self.repositories.health_monitor.create(
context.session, **hm_dict)
db_hm = self.repositories.health_monitor.create(context.session,
**hm_dict)
except odb_exceptions.DBError:
# Setting LB and Listener back to active because this is just a
# validation failure
self.repositories.load_balancer.update(
context.session, self.load_balancer_id,
provisioning_status=constants.ACTIVE)
self.repositories.listener.update(
context.session, self.listener_id,
provisioning_status=constants.ACTIVE)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
provisioning_status=constants.ACTIVE)
raise exceptions.InvalidOption(value=hm_dict.get('type'),
option='type')
try:
@ -101,11 +118,11 @@ class HealthMonitorController(base.BaseController):
self.handler.create(db_hm)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_hm = self.repositories.health_monitor.get(
context.session, pool_id=self.pool_id)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_hm = self._get_db_hm(context.session)
return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
@wsme_pecan.wsexpose(hm_types.HealthMonitorResponse,
@ -118,72 +135,39 @@ class HealthMonitorController(base.BaseController):
id.
"""
context = pecan.request.context.get('octavia_context')
db_hm = self.repositories.health_monitor.get(
context.session, pool_id=self.pool_id)
if not db_hm:
LOG.info(_LI("Health Monitor for Pool %s was not found"),
self.pool_id)
raise exceptions.NotFound(
resource=data_models.HealthMonitor._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Health Monitor for Pool %s cannot be updated "
"because the Load Balancer is immutable."),
self.pool_id)
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
db_hm = self._get_db_hm(context.session)
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
try:
LOG.info(_LI("Sending Update of Health Monitor for Pool %s to "
"handler"), self.pool_id)
self.handler.update(db_hm, health_monitor)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_hm = self.repositories.health_monitor.get(
context.session, pool_id=self.pool_id)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_hm = self._get_db_hm(context.session)
return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
@wsme_pecan.wsexpose(None, status_code=202)
def delete(self):
"""Deletes a health monitor."""
context = pecan.request.context.get('octavia_context')
db_hm = self.repositories.health_monitor.get(
context.session, pool_id=self.pool_id)
if not db_hm:
LOG.info(_LI("Health Monitor for Pool %s cannot be updated "
"because the Load Balancer is immutable."),
self.pool_id)
raise exceptions.NotFound(
resource=data_models.HealthMonitor._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
db_hm = self.repositories.health_monitor.get(context.session,
pool_id=self.pool_id)
db_hm = self._get_db_hm(context.session)
self._test_lb_and_listener_statuses(context.session, hm=db_hm)
try:
LOG.info(_LI("Sending Deletion of Health Monitor for Pool %s to "
"handler"), self.pool_id)
self.handler.delete(db_hm)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_hm = self.repositories.health_monitor.get(
context.session, pool_id=self.pool_id)
return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -26,7 +27,6 @@ from octavia.api.v1.types import listener as listener_types
from octavia.common import constants
from octavia.common import data_models
from octavia.common import exceptions
from octavia.db import api as db_api
from octavia.i18n import _LI
@ -46,16 +46,21 @@ class ListenersController(base.BaseController):
# available
listener.tls_termination = wtypes.Unset
@wsme_pecan.wsexpose(listener_types.ListenerResponse, wtypes.text)
def get_one(self, id):
"""Gets a single listener's details."""
context = pecan.request.context.get('octavia_context')
def _get_db_listener(self, session, id):
"""Gets a listener object from the database."""
db_listener = self.repositories.listener.get(
context.session, load_balancer_id=self.load_balancer_id, id=id)
session, load_balancer_id=self.load_balancer_id, id=id)
if not db_listener:
LOG.info(_LI("Listener %s not found."), id)
raise exceptions.NotFound(
resource=data_models.Listener._name(), id=id)
return db_listener
@wsme_pecan.wsexpose(listener_types.ListenerResponse, wtypes.text)
def get_one(self, id):
"""Gets a single listener's details."""
context = pecan.request.context.get('octavia_context')
db_listener = self._get_db_listener(context.session, id)
return self._convert_db_to_type(db_listener,
listener_types.ListenerResponse)
@ -68,36 +73,56 @@ class ListenersController(base.BaseController):
return self._convert_db_to_type(db_listeners,
[listener_types.ListenerResponse])
def _test_lb_status_post(self, context, lb_repo):
"""Verify load balancer is in a mutable status for post method."""
if not lb_repo.test_and_set_provisioning_status(
context.session, self.load_balancer_id,
constants.PENDING_UPDATE):
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
def _test_lb_and_listener_statuses(
self, session, id=None, listener_status=constants.PENDING_UPDATE):
"""Verify load balancer is in a mutable state."""
lb_repo = self.repositories.load_balancer
if id:
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
session, self.load_balancer_id, constants.PENDING_UPDATE,
listener_status, listener_ids=[id]):
LOG.info(_LI("Load Balancer %s is immutable."),
self.load_balancer_id)
db_lb = lb_repo.get(session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
else:
if not lb_repo.test_and_set_provisioning_status(
session, self.load_balancer_id, constants.PENDING_UPDATE):
db_lb = lb_repo.get(session, id=self.load_balancer_id)
LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
def _validate_listeners(self, context, lb_repo, listener_dict):
"""Validate listeners for wrong protocol or duplicate listeners
def _validate_pool(self, session, pool_id):
"""Validate pool given exists on same load balancer as listener."""
db_pool = self.repositories.pool.get(
session, load_balancer_id=self.load_balancer_id, id=pool_id)
if not db_pool:
raise exceptions.NotFound(
resource=data_models.Pool._name(), id=pool_id)
def _validate_listener(self, session, listener_dict):
"""Validate listener for wrong protocol or duplicate listeners
Update the load balancer db when provisioning status changes.
"""
lb_repo = self.repositories.load_balancer
try:
sni_container_ids = listener_dict.pop('sni_containers')
db_listener = self.repositories.listener.create(
context.session, **listener_dict)
session, **listener_dict)
if sni_container_ids is not None:
for container_id in sni_container_ids:
sni_dict = {'listener_id': db_listener.id,
'tls_container_id': container_id}
self.repositories.sni.create(context.session, **sni_dict)
db_listener = self.repositories.listener.get(context.session,
self.repositories.sni.create(session, **sni_dict)
db_listener = self.repositories.listener.get(session,
id=db_listener.id)
except odb_exceptions.DBDuplicateEntry as de:
# Setting LB back to active because this is just a validation
# failure
lb_repo.update(context.session, self.load_balancer_id,
lb_repo.update(session, self.load_balancer_id,
provisioning_status=constants.ACTIVE)
if ['id'] == de.columns:
raise exceptions.IDAlreadyExists()
@ -107,7 +132,7 @@ class ListenersController(base.BaseController):
except odb_exceptions.DBError:
# Setting LB back to active because this is just a validation
# failure
lb_repo.update(context.session, self.load_balancer_id,
lb_repo.update(session, self.load_balancer_id,
provisioning_status=constants.ACTIVE)
raise exceptions.InvalidOption(value=listener_dict.get('protocol'),
option='protocol')
@ -118,10 +143,9 @@ class ListenersController(base.BaseController):
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, db_listener.id,
session, db_listener.id,
provisioning_status=constants.ERROR)
db_listener = self.repositories.listener.get(
context.session, id=db_listener.id)
db_listener = self._get_db_listener(session, db_listener.id)
return self._convert_db_to_type(db_listener,
listener_types.ListenerResponse)
@ -129,33 +153,23 @@ class ListenersController(base.BaseController):
body=listener_types.ListenerPOST, status_code=202)
def post(self, listener):
"""Creates a listener on a load balancer."""
context = pecan.request.context.get('octavia_context')
self._secure_data(listener)
lb_repo = self.repositories.load_balancer
self._test_lb_status_post(context, lb_repo)
context = pecan.request.context.get('octavia_context')
listener_dict = listener.to_dict()
listener_dict['load_balancer_id'] = self.load_balancer_id
listener_dict['provisioning_status'] = constants.PENDING_CREATE
listener_dict['operating_status'] = constants.OFFLINE
if listener_dict['default_pool_id']:
self._validate_pool(context.session,
listener_dict['default_pool_id'])
self._test_lb_and_listener_statuses(context.session)
# NOTE(blogan): Throwing away because we should not store secure data
# in the database nor should we send it to a handler.
if 'tls_termination' in listener_dict:
del listener_dict['tls_termination']
# This is the extra validation layer for wrong protocol or duplicate
# listeners on the same load balancer.
return self._validate_listeners(context, lb_repo, listener_dict)
def _test_lb_status_put(self, context, id):
"""Test load balancer status for put method."""
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Load Balancer %s is immutable."),
self.load_balancer_id)
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
return self._validate_listener(context.session, listener_dict)
@wsme_pecan.wsexpose(listener_types.ListenerResponse, wtypes.text,
body=listener_types.ListenerPUT, status_code=202)
@ -163,15 +177,13 @@ class ListenersController(base.BaseController):
"""Updates a listener on a load balancer."""
self._secure_data(listener)
context = pecan.request.context.get('octavia_context')
db_listener = self.repositories.listener.get(context.session, id=id)
if not db_listener:
LOG.info(_LI("Listener %s not found."), id)
raise exceptions.NotFound(
resource=data_models.Listener._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
self._test_lb_status_put(context, id)
db_listener = self._get_db_listener(context.session, id)
listener_dict = listener.to_dict()
if listener_dict['default_pool_id']:
self._validate_pool(context.session,
listener_dict['default_pool_id'])
self._test_lb_and_listener_statuses(context.session, id=id)
try:
LOG.info(_LI("Sending Update of Listener %s to handler"), id)
self.handler.update(db_listener, listener)
@ -179,7 +191,7 @@ class ListenersController(base.BaseController):
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, id, provisioning_status=constants.ERROR)
db_listener = self.repositories.listener.get(context.session, id=id)
db_listener = self._get_db_listener(context.session, id)
return self._convert_db_to_type(db_listener,
listener_types.ListenerResponse)
@ -187,22 +199,10 @@ class ListenersController(base.BaseController):
def delete(self, id):
"""Deletes a listener from a load balancer."""
context = pecan.request.context.get('octavia_context')
db_listener = self.repositories.listener.get(context.session, id=id)
if not db_listener:
LOG.info(_LI("Listener %s not found."), id)
raise exceptions.NotFound(
resource=data_models.Listener._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, id,
constants.PENDING_UPDATE, constants.PENDING_DELETE):
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
db_listener = self.repositories.listener.get(context.session, id=id)
db_listener = self._get_db_listener(context.session, id)
self._test_lb_and_listener_statuses(
context.session, id=id, listener_status=constants.PENDING_DELETE)
try:
LOG.info(_LI("Sending Deletion of Listener %s to handler"),
db_listener.id)
@ -224,11 +224,11 @@ class ListenersController(base.BaseController):
Verifies that the listener passed in the url exists, and if so decides
which controller, if any, should control be passed.
"""
session = db_api.get_session()
context = pecan.request.context.get('octavia_context')
if listener_id and len(remainder) and remainder[0] == 'pools':
remainder = remainder[1:]
db_listener = self.repositories.listener.get(
session, id=listener_id)
context.session, id=listener_id)
if not db_listener:
LOG.info(_LI("Listener %s not found."), listener_id)
raise exceptions.NotFound(

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -21,11 +22,11 @@ from wsmeext import pecan as wsme_pecan
from octavia.api.v1.controllers import base
from octavia.api.v1.controllers import listener
from octavia.api.v1.controllers import pool
from octavia.api.v1.types import load_balancer as lb_types
from octavia.common import constants
from octavia.common import data_models
from octavia.common import exceptions
from octavia.db import api as db_api
from octavia.i18n import _LI
@ -38,16 +39,20 @@ class LoadBalancersController(base.BaseController):
super(LoadBalancersController, self).__init__()
self.handler = self.handler.load_balancer
def _get_db_lb(self, session, id):
"""Gets a load_balancer object from the database."""
db_lb = self.repositories.load_balancer.get(session, id=id)
if not db_lb:
LOG.info(_LI("Load Balancer %s was not found."), id)
raise exceptions.NotFound(
resource=data_models.LoadBalancer._name(), id=id)
return db_lb
@wsme_pecan.wsexpose(lb_types.LoadBalancerResponse, wtypes.text)
def get_one(self, id):
"""Gets a single load balancer's details."""
context = pecan.request.context.get('octavia_context')
load_balancer = self.repositories.load_balancer.get(
context.session, id=id)
if not load_balancer:
LOG.info(_LI("Load Balancer %s was not found."), id)
raise exceptions.NotFound(
resource=data_models.LoadBalancer._name(), id=id)
load_balancer = self._get_db_lb(context.session, id)
return self._convert_db_to_type(load_balancer,
lb_types.LoadBalancerResponse)
@ -65,6 +70,16 @@ class LoadBalancersController(base.BaseController):
return self._convert_db_to_type(load_balancers,
[lb_types.LoadBalancerResponse])
def _test_lb_status(self, session, id, lb_status=constants.PENDING_UPDATE):
"""Verify load balancer is in a mutable state."""
lb_repo = self.repositories.load_balancer
if not lb_repo.test_and_set_provisioning_status(
session, id, lb_status):
LOG.info(_LI("Load Balancer %s is immutable."), id)
db_lb = lb_repo.get(session, id=id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=id)
@wsme_pecan.wsexpose(lb_types.LoadBalancerResponse,
body=lb_types.LoadBalancerPOST, status_code=202)
def post(self, load_balancer):
@ -98,19 +113,9 @@ class LoadBalancersController(base.BaseController):
def put(self, id, load_balancer):
"""Updates a load balancer."""
context = pecan.request.context.get('octavia_context')
# Purely to make lines smaller length
lb_repo = self.repositories.load_balancer
db_lb = self.repositories.load_balancer.get(context.session, id=id)
if not db_lb:
LOG.info(_LI("Load Balancer %s was not found."), id)
raise exceptions.NotFound(
resource=data_models.LoadBalancer._name(), id=id)
# Check load balancer is in a mutable status
if not lb_repo.test_and_set_provisioning_status(
context.session, id, constants.PENDING_UPDATE):
LOG.info(_LI("Load Balancer %s is immutable."), id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=id)
db_lb = self._get_db_lb(context.session, id)
self._test_lb_status(context.session, id)
try:
LOG.info(_LI("Sending updated Load Balancer %s to the handler"),
id)
@ -119,27 +124,17 @@ class LoadBalancersController(base.BaseController):
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.load_balancer.update(
context.session, id, provisioning_status=constants.ERROR)
lb = self.repositories.load_balancer.get(context.session, id=id)
return self._convert_db_to_type(lb, lb_types.LoadBalancerResponse)
db_lb = self._get_db_lb(context.session, id)
return self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=202)
def delete(self, id):
"""Deletes a load balancer."""
context = pecan.request.context.get('octavia_context')
# Purely to make lines smaller length
lb_repo = self.repositories.load_balancer
db_lb = self.repositories.load_balancer.get(context.session, id=id)
if not db_lb:
LOG.info(_LI("Load Balancer %s was not found."), id)
raise exceptions.NotFound(
resource=data_models.LoadBalancer._name(), id=id)
# Check load balancer is in a mutable status
if not lb_repo.test_and_set_provisioning_status(
context.session, id, constants.PENDING_DELETE):
LOG.info(_LI("Load Balancer %s is immutable."), id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=id)
db_lb = self.repositories.load_balancer.get(context.session, id=id)
db_lb = self._get_db_lb(context.session, id)
self._test_lb_status(context.session, id,
lb_status=constants.PENDING_DELETE)
try:
LOG.info(_LI("Sending deleted Load Balancer %s to the handler"),
db_lb.id)
@ -158,14 +153,20 @@ class LoadBalancersController(base.BaseController):
Verifies that the load balancer passed in the url exists, and if so
decides which controller, if any, should control be passed.
"""
session = db_api.get_session()
if lb_id and len(remainder) and remainder[0] == 'listeners':
context = pecan.request.context.get('octavia_context')
if lb_id and len(remainder) and (remainder[0] == 'listeners' or
remainder[0] == 'pools'):
controller = remainder[0]
remainder = remainder[1:]
db_lb = self.repositories.load_balancer.get(
session, id=lb_id)
db_lb = self.repositories.load_balancer.get(context.session,
id=lb_id)
if not db_lb:
LOG.info(_LI("Load Balancer %s was not found."), lb_id)
raise exceptions.NotFound(
resource=data_models.LoadBalancer._name(), id=lb_id)
return listener.ListenersController(
load_balancer_id=db_lb.id), remainder
if controller == 'listeners':
return listener.ListenersController(
load_balancer_id=db_lb.id), remainder
elif controller == 'pools':
return pool.PoolsController(
load_balancer_id=db_lb.id), remainder

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -33,22 +34,27 @@ LOG = logging.getLogger(__name__)
class MembersController(base.BaseController):
def __init__(self, load_balancer_id, listener_id, pool_id):
def __init__(self, load_balancer_id, pool_id, listener_id=None):
super(MembersController, self).__init__()
self.load_balancer_id = load_balancer_id
self.listener_id = listener_id
self.pool_id = pool_id
self.handler = self.handler.member
@wsme_pecan.wsexpose(member_types.MemberResponse, wtypes.text)
def get(self, id):
"""Gets a single pool member's details."""
context = pecan.request.context.get('octavia_context')
db_member = self.repositories.member.get(context.session, id=id)
def _get_db_member(self, session, id):
"""Gets a specific member object from the database."""
db_member = self.repositories.member.get(session, id=id)
if not db_member:
LOG.info(_LI("Member %s not found"), id)
raise exceptions.NotFound(
resource=data_models.Member._name(), id=id)
return db_member
@wsme_pecan.wsexpose(member_types.MemberResponse, wtypes.text)
def get(self, id):
"""Gets a single pool member's details."""
context = pecan.request.context.get('octavia_context')
db_member = self._get_db_member(context.session, id)
return self._convert_db_to_type(db_member, member_types.MemberResponse)
@wsme_pecan.wsexpose([member_types.MemberResponse])
@ -60,6 +66,26 @@ class MembersController(base.BaseController):
return self._convert_db_to_type(db_members,
[member_types.MemberResponse])
def _test_lb_and_listener_statuses(self, session, member=None):
"""Verify load balancer is in a mutable state."""
# We need to verify that any listeners referencing this member's
# pool are also mutable
listener_ids = []
if member:
listener_ids = [l.id for l in member.pool.listeners]
if self.listener_id and self.listener_id not in listener_ids:
listener_ids.append(self.listener_id)
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
session, self.load_balancer_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
listener_ids=listener_ids):
LOG.info(_LI("Member cannot be created or modified because the "
"Load Balancer is in an immutable state"))
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
@wsme_pecan.wsexpose(member_types.MemberResponse,
body=member_types.MemberPOST, status_code=202)
def post(self, member):
@ -68,30 +94,21 @@ class MembersController(base.BaseController):
member_dict = member.to_dict()
member_dict['pool_id'] = self.pool_id
member_dict['operating_status'] = constants.OFFLINE
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all its listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Member cannot be created because its Load "
"Balancer is in an immutable state."))
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
self._test_lb_and_listener_statuses(context.session)
try:
db_member = self.repositories.member.create(
context.session, **member_dict)
db_member = self.repositories.member.create(context.session,
**member_dict)
except oslo_exc.DBDuplicateEntry as de:
# Setting LB and Listener back to active because this is just a
# validation failure
self.repositories.load_balancer.update(
context.session, self.load_balancer_id,
provisioning_status=constants.ACTIVE)
self.repositories.listener.update(
context.session, self.listener_id,
provisioning_status=constants.ACTIVE)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
provisioning_status=constants.ACTIVE)
if ['id'] == de.columns:
raise exceptions.IDAlreadyExists()
elif (set(['pool_id', 'ip_address', 'protocol_port']) ==
@ -108,8 +125,7 @@ class MembersController(base.BaseController):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_member = self.repositories.member.get(context.session,
id=db_member.id)
db_member = self._get_db_member(context.session, db_member.id)
return self._convert_db_to_type(db_member, member_types.MemberResponse)
@wsme_pecan.wsexpose(member_types.MemberResponse,
@ -118,64 +134,37 @@ class MembersController(base.BaseController):
def put(self, id, member):
"""Updates a pool member."""
context = pecan.request.context.get('octavia_context')
db_member = self.repositories.member.get(context.session, id=id)
if not db_member:
LOG.info(_LI("Member %s cannot be updated because its Load "
"Balancer is in an immutable state."), id)
LOG.info(_LI("Member %s not found"), id)
raise exceptions.NotFound(
resource=data_models.Member._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all its listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
db_member = self._get_db_member(context.session, id)
self._test_lb_and_listener_statuses(context.session, member=db_member)
try:
LOG.info(_LI("Sending Update of Member %s to handler"), id)
self.handler.update(db_member, member)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_member = self.repositories.member.get(context.session, id=id)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_member = self._get_db_member(context.session, id)
return self._convert_db_to_type(db_member, member_types.MemberResponse)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=202)
def delete(self, id):
"""Deletes a pool member."""
context = pecan.request.context.get('octavia_context')
db_member = self.repositories.member.get(context.session, id=id)
if not db_member:
LOG.info(_LI("Member %s not found"), id)
raise exceptions.NotFound(
resource=data_models.Member._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all its listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Member %s cannot be deleted because its Load "
"Balancer is in an immutable state."), id)
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
db_member = self.repositories.member.get(context.session, id=id)
db_member = self._get_db_member(context.session, id)
self._test_lb_and_listener_statuses(context.session, member=db_member)
try:
LOG.info(_LI("Sending Deletion of Member %s to handler"),
db_member.id)
self.handler.delete(db_member)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
if self.listener_id:
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_member = self.repositories.member.get(context.session, id=id)
return self._convert_db_to_type(db_member, member_types.MemberResponse)

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -35,41 +36,65 @@ LOG = logging.getLogger(__name__)
class PoolsController(base.BaseController):
def __init__(self, load_balancer_id, listener_id):
def __init__(self, load_balancer_id, listener_id=None):
super(PoolsController, self).__init__()
self.load_balancer_id = load_balancer_id
self.listener_id = listener_id
self.handler = self.handler.pool
def _get_db_pool(self, session, id):
"""Gets a pool object from the database."""
db_pool = self.repositories.pool.get(session, id=id)
if not db_pool:
LOG.info(_LI("Pool %s not found."), id)
raise exceptions.NotFound(resource=data_models.Pool._name(), id=id)
return db_pool
def _get_db_listener(self, session, id):
"""Gets a listener object from the database."""
db_listener = self.repositories.listener.get(session, id=id)
if not db_listener:
LOG.info(_LI("Listener %s not found."), id)
raise exceptions.NotFound(resource=data_models.Listener._name(),
id=id)
return db_listener
@wsme_pecan.wsexpose(pool_types.PoolResponse, wtypes.text)
def get(self, id):
"""Gets a pool's details."""
context = pecan.request.context.get('octavia_context')
db_pool = self.repositories.pool.get(context.session, id=id)
if not db_pool:
LOG.info(_LI("Pool %s not found."), id)
raise exceptions.NotFound(resource=data_models.Pool._name(), id=id)
db_pool = self._get_db_pool(context.session, id)
return self._convert_db_to_type(db_pool, pool_types.PoolResponse)
@wsme_pecan.wsexpose([pool_types.PoolResponse])
def get_all(self):
"""Lists all pools on a listener."""
context = pecan.request.context.get('octavia_context')
default_pool = self.repositories.listener.get(
context.session, id=self.listener_id).default_pool
if default_pool:
default_pool = [default_pool]
else:
default_pool = []
return self._convert_db_to_type(default_pool,
[pool_types.PoolResponse])
@wsme_pecan.wsexpose([pool_types.PoolResponse], wtypes.text)
def get_all(self, listener_id=None):
def _test_lb_status(self, session):
"""Verify load balancer is in a mutable status."""
if not self.repositories.test_and_set_lb_and_listener_prov_status(
session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Pool cannot be created because the Load "
"""Lists all pools on a listener or loadbalancer."""
context = pecan.request.context.get('octavia_context')
if listener_id is not None:
self.listener_id = listener_id
if self.listener_id:
pools = self._get_db_listener(context.session,
self.listener_id).pools
else:
pools = self.repositories.load_balancer.get(
context.session, id=self.load_balancer_id).pools
return self._convert_db_to_type(pools, [pool_types.PoolResponse])
def _test_lb_and_listener_statuses(self, session, pool=None):
"""Verify load balancer is in a mutable state."""
# We need to verify that any listeners referencing this pool are also
# mutable
listener_ids = []
if pool:
listener_ids = [l.id for l in pool.listeners]
if self.listener_id and self.listener_id not in listener_ids:
listener_ids.append(self.listener_id)
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
session, self.load_balancer_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
listener_ids=listener_ids):
LOG.info(_LI("Pool cannot be created or modified because the Load "
"Balancer is in an immutable state"))
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(session, id=self.load_balancer_id)
@ -79,24 +104,26 @@ class PoolsController(base.BaseController):
def _validate_create_pool(self, session, sp_dict, pool_dict):
"""Validate creating pool on load balancer.
Update database for load balancer and listener based on provisioning
status.
Update database for load balancer and (optional) listener based on
provisioning status.
"""
try:
db_pool = self.repositories.create_pool_on_listener(
session, self.listener_id, pool_dict, sp_dict=sp_dict)
db_pool = self.repositories.create_pool_on_load_balancer(
session, pool_dict, listener_id=self.listener_id,
sp_dict=sp_dict)
except odb_exceptions.DBDuplicateEntry as de:
if ['id'] == de.columns:
raise exceptions.IDAlreadyExists()
except odb_exceptions.DBError:
# Setting LB and Listener back to active because this is just a
# validation failure
if self.listener_id:
self.repositories.listener.update(
session, self.listener_id,
provisioning_status=constants.ACTIVE)
self.repositories.load_balancer.update(
session, self.load_balancer_id,
provisioning_status=constants.ACTIVE)
self.repositories.listener.update(
session, self.listener_id,
provisioning_status=constants.ACTIVE)
# TODO(blogan): will have to do separate validation protocol
# before creation or update since the exception messages
# do not give any information as to what constraint failed
@ -106,104 +133,84 @@ class PoolsController(base.BaseController):
db_pool.id)
self.handler.create(db_pool)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
session, self.listener_id,
operating_status=constants.ERROR)
db_pool = self.repositories.pool.get(session, id=db_pool.id)
if self.listener_id:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
session, self.listener_id,
operating_status=constants.ERROR)
db_pool = self._get_db_pool(session, db_pool.id)
return self._convert_db_to_type(db_pool, pool_types.PoolResponse)
@wsme_pecan.wsexpose(pool_types.PoolResponse, body=pool_types.PoolPOST,
status_code=202)
def post(self, pool):
"""Creates a pool on a listener.
"""Creates a pool on a load balancer or listener.
This does not allow more than one pool to be on a listener so once one
is created, another cannot be created until the first one has been
deleted.
Note that this can optionally take a listener_id with which the pool
should be associated as the listener's default_pool. If specified,
the pool creation will fail if the listener specified already has
a default_pool.
"""
# For some API requests the listener_id will be passed in the
# pool_dict:
pool_dict = pool.to_dict()
if 'listener_id' in pool_dict:
if pool_dict['listener_id'] is not None:
self.listener_id = pool_dict.pop('listener_id')
else:
del pool_dict['listener_id']
context = pecan.request.context.get('octavia_context')
if self.repositories.listener.has_pool(
if self.listener_id and self.repositories.listener.has_default_pool(
context.session, self.listener_id):
raise exceptions.DuplicatePoolEntry()
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
self._test_lb_and_listener_statuses(context.session)
self._test_lb_status(context.session)
pool_dict = pool.to_dict()
sp_dict = pool_dict.pop('session_persistence', None)
pool_dict['operating_status'] = constants.OFFLINE
pool_dict['load_balancer_id'] = self.load_balancer_id
return self._validate_create_pool(context.session, sp_dict, pool_dict)
def _test_lb_status_put(self, session):
"""Verify load balancer is in a mutable status for put method."""
if not self.repositories.test_and_set_lb_and_listener_prov_status(
session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Pool %s cannot be updated because the Load "
"Balancer is in an immutable state"), id)
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
@wsme_pecan.wsexpose(pool_types.PoolResponse, wtypes.text,
body=pool_types.PoolPUT, status_code=202)
def put(self, id, pool):
"""Updates a pool on a listener."""
"""Updates a pool on a load balancer."""
context = pecan.request.context.get('octavia_context')
db_pool = self.repositories.pool.get(context.session, id=id)
if not db_pool:
LOG.info(_LI("Pool %s not found."), id)
raise exceptions.NotFound(resource=data_models.Pool._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
self._test_lb_status_put(context.session)
db_pool = self._get_db_pool(context.session, id)
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
try:
LOG.info(_LI("Sending Update of Pool %s to handler"), id)
self.handler.update(db_pool, pool)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
for listener in db_pool.listeners:
self.repositories.listener.update(
context.session, listener.id,
operating_status=constants.ERROR)
self.repositories.pool.update(
context.session, db_pool.id,
operating_status=constants.ERROR)
db_pool = self.repositories.pool.get(context.session, id=id)
db_pool = self._get_db_pool(context.session, id)
return self._convert_db_to_type(db_pool, pool_types.PoolResponse)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=202)
def delete(self, id):
"""Deletes a pool from a listener."""
"""Deletes a pool from a load balancer."""
context = pecan.request.context.get('octavia_context')
db_pool = self.repositories.pool.get(context.session, id=id)
if not db_pool:
LOG.info(_LI("Pool %s not found."), id)
raise exceptions.NotFound(resource=data_models.Pool._name(), id=id)
# Verify load balancer is in a mutable status. If so it can be assumed
# that the listener is also in a mutable status because a load balancer
# will only be ACTIVE when all it's listeners as ACTIVE.
if not self.repositories.test_and_set_lb_and_listener_prov_status(
context.session, self.load_balancer_id, self.listener_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE):
LOG.info(_LI("Pool %s cannot be deleted because the Load "
"Balancer is in an immutable state"), id)
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(context.session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
db_pool = self.repositories.pool.get(context.session, id=id)
db_pool = self._get_db_pool(context.session, id)
self._test_lb_and_listener_statuses(context.session, pool=db_pool)
try:
LOG.info(_LI("Sending Deletion of Pool %s to handler"),
db_pool.id)
self.handler.delete(db_pool)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
for listener in db_pool.listeners:
self.repositories.listener.update(
context.session, listener.id,
operating_status=constants.ERROR)
self.repositories.pool.update(
context.session, db_pool.id,
operating_status=constants.ERROR)
@ -218,25 +225,22 @@ class PoolsController(base.BaseController):
which controller, if any, should control be passed.
"""
context = pecan.request.context.get('octavia_context')
if pool_id and len(remainder) and remainder[0] == 'members':
if pool_id and len(remainder) and (remainder[0] == 'members' or
remainder[0] == 'healthmonitor'):
controller = remainder[0]
remainder = remainder[1:]
db_pool = self.repositories.pool.get(context.session, id=pool_id)
if not db_pool:
LOG.info(_LI("Pool %s not found."), pool_id)
raise exceptions.NotFound(resource=data_models.Pool._name(),
id=pool_id)
return member.MembersController(
load_balancer_id=self.load_balancer_id,
listener_id=self.listener_id,
pool_id=db_pool.id), remainder
if pool_id and len(remainder) and remainder[0] == 'healthmonitor':
remainder = remainder[1:]
db_pool = self.repositories.pool.get(context.session, id=pool_id)
if not db_pool:
LOG.info(_LI("Pool %s not found."), pool_id)
raise exceptions.NotFound(resource=data_models.Pool._name(),
id=pool_id)
return health_monitor.HealthMonitorController(
load_balancer_id=self.load_balancer_id,
listener_id=self.listener_id,
pool_id=db_pool.id), remainder
if controller == 'members':
return member.MembersController(
load_balancer_id=self.load_balancer_id,
pool_id=db_pool.id,
listener_id=self.listener_id), remainder
elif controller == 'healthmonitor':
return health_monitor.HealthMonitorController(
load_balancer_id=self.load_balancer_id,
pool_id=db_pool.id,
listener_id=self.listener_id), remainder

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -52,21 +53,34 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
time.sleep(ASYNC_TIME)
LOG.info(_LI("Simulating controller operation for member..."))
db_mem = None
if delete:
db_mem = repo.member.get(db_api.get_session(), member.id)
repo.member.delete(db_api.get_session(), id=member.id)
elif update:
old_mem = repo.member.get(db_api.get_session(), member.id)
db_mem = repo.member.get(db_api.get_session(), member.id)
member_dict = member.to_dict()
member_dict['operating_status'] = old_mem.operating_status
member_dict['operating_status'] = db_mem.operating_status
repo.member.update(db_api.get_session(), member.id, **member_dict)
elif create:
repo.member.update(db_api.get_session(), member.id,
operating_status=constants.ONLINE)
repo.listener.update(db_api.get_session(), member.pool.listener.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
listeners = []
if db_mem:
for listener in db_mem.pool.listeners:
if listener not in listeners:
listeners.append(listener)
if member.pool.listeners:
for listener in member.pool.listeners:
if listener not in listeners:
listeners.append(listener)
if listeners:
for listener in listeners:
repo.listener.update(db_api.get_session(), listener.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
repo.load_balancer.update(db_api.get_session(),
member.pool.listener.load_balancer.id,
member.pool.load_balancer.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
@ -76,30 +90,44 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
time.sleep(ASYNC_TIME)
LOG.info(_LI("Simulating controller operation for health monitor..."))
db_hm = None
if delete:
db_hm = repo.health_monitor.get(db_api.get_session(),
pool_id=health_monitor.pool.id)
repo.health_monitor.delete(db_api.get_session(),
pool_id=health_monitor.pool.id)
elif update:
hm = repo.health_monitor.get(db_api.get_session(),
health_monitor.pool_id)
db_hm = repo.health_monitor.get(db_api.get_session(),
health_monitor.pool_id)
hm_dict = health_monitor.to_dict()
hm_dict['operating_status'] = hm.operating_status()
hm_dict['operating_status'] = db_hm.operating_status()
repo.health_monitor.update(db_api.get_session(), **hm_dict)
elif create:
repo.pool.update(db_api.get_session(), health_monitor.pool_id,
operating_status=constants.ONLINE)
repo.test_and_set_lb_and_listener_prov_status(
db_api.get_session(),
health_monitor.pool.listener.load_balancer.id,
health_monitor.pool.listener.id, constants.ACTIVE,
constants.ACTIVE)
repo.listener.update(db_api.get_session(),
health_monitor.pool.listener.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
listeners = []
if db_hm:
for listener in db_hm.pool.listeners:
if listener not in listeners:
listeners.append(listener)
if health_monitor.pool.listeners:
for listener in health_monitor.pool.listeners:
if listener not in listeners:
listeners.append(listener)
if listeners:
for listener in listeners:
repo.test_and_set_lb_and_listener_prov_status(
db_api.get_session(),
health_monitor.pool.load_balancer.id,
listener.id, constants.ACTIVE,
constants.ACTIVE)
repo.listener.update(db_api.get_session(),
listener.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
repo.load_balancer.update(
db_api.get_session(),
health_monitor.pool.listener.load_balancer.id,
health_monitor.pool.load_balancer.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
LOG.info(_LI("Simulated Controller Handler Thread Complete"))
@ -108,23 +136,36 @@ def simulate_controller(data_model, delete=False, update=False, create=False):
time.sleep(ASYNC_TIME)
LOG.info(_LI("Simulating controller operation for pool..."))
db_pool = None
if delete:
db_pool = repo.pool.get(db_api.get_session(), id=pool.id)
repo.pool.delete(db_api.get_session(), id=pool.id)
elif update:
db_pool = repo.pool.get(db_api.get_session(), id=pool.id)
pool_dict = pool.to_dict()
pool_dict['operating_status'] = db_pool.operating_status
sp_dict = pool_dict.pop('session_persistence', None)
repo.update_pool_on_listener(db_api.get_session(), pool.id,
pool_dict, sp_dict)
repo.update_pool_and_sp(db_api.get_session(), pool.id,
pool_dict, sp_dict)
elif create:
repo.pool.update(db_api.get_session(), pool.id,
operating_status=constants.ONLINE)
repo.listener.update(db_api.get_session(), pool.listener.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
listeners = []
if db_pool:
for listener in db_pool.listeners:
if listener not in listeners:
listeners.append(listener)
if pool.listeners:
for listener in pool.listeners:
if listener not in listeners:
listeners.append(listener)
if listeners:
for listener in listeners:
repo.listener.update(db_api.get_session(), listener.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
repo.load_balancer.update(db_api.get_session(),
pool.listener.load_balancer.id,
pool.load_balancer.id,
operating_status=constants.ONLINE,
provisioning_status=constants.ACTIVE)
LOG.info(_LI("Simulated Controller Handler Thread Complete"))

View File

@ -39,6 +39,7 @@ class ListenerResponse(base.BaseType):
tls_certificate_id = wtypes.wsattr(wtypes.StringType(max_length=255))
sni_containers = [wtypes.StringType(max_length=255)]
project_id = wtypes.wsattr(wtypes.UuidType())
default_pool_id = wtypes.wsattr(wtypes.UuidType())
class ListenerPOST(base.BaseType):
@ -55,6 +56,7 @@ class ListenerPOST(base.BaseType):
tls_termination = wtypes.wsattr(TLSTermination)
sni_containers = [wtypes.StringType(max_length=255)]
project_id = wtypes.wsattr(wtypes.UuidType())
default_pool_id = wtypes.wsattr(wtypes.UuidType())
class ListenerPUT(base.BaseType):
@ -68,3 +70,4 @@ class ListenerPUT(base.BaseType):
tls_certificate_id = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_termination = wtypes.wsattr(TLSTermination)
sni_containers = [wtypes.StringType(max_length=255)]
default_pool_id = wtypes.wsattr(wtypes.UuidType())

View File

@ -56,6 +56,7 @@ class PoolPOST(base.BaseType):
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
enabled = wtypes.wsattr(bool, default=True)
listener_id = wtypes.wsattr(wtypes.UuidType())
protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS),
mandatory=True)
lb_algorithm = wtypes.wsattr(wtypes.text, mandatory=True)

View File

@ -100,11 +100,14 @@ class Pool(BaseDataModel):
def __init__(self, id=None, project_id=None, name=None, description=None,
protocol=None, lb_algorithm=None, enabled=None,
operating_status=None, members=None, health_monitor=None,
session_persistence=None, listener=None):
session_persistence=None, load_balancer_id=None,
load_balancer=None, listeners=None):
self.id = id
self.project_id = project_id
self.name = name
self.description = description
self.load_balancer_id = load_balancer_id
self.load_balancer = load_balancer
self.protocol = protocol
self.lb_algorithm = lb_algorithm
self.enabled = enabled
@ -112,11 +115,14 @@ class Pool(BaseDataModel):
self.members = members or []
self.health_monitor = health_monitor
self.session_persistence = session_persistence
self.listener = listener
self.listeners = listeners or []
def delete(self):
self.listener.default_pool = None
self.listener.default_pool_id = None
# TODO(sbalukoff): Clean up L7Policies that reference this pool
for listener in self.listeners:
if listener.default_pool_id == self.id:
listener.default_pool = None
listener.default_pool_id = None
class Member(BaseDataModel):
@ -149,7 +155,8 @@ class Listener(BaseDataModel):
protocol_port=None, connection_limit=None,
enabled=None, provisioning_status=None, operating_status=None,
tls_certificate_id=None, stats=None, default_pool=None,
load_balancer=None, sni_containers=None, peer_port=None):
load_balancer=None, sni_containers=None, peer_port=None,
pools=None):
self.id = id
self.project_id = project_id
self.name = name
@ -168,6 +175,7 @@ class Listener(BaseDataModel):
self.load_balancer = load_balancer
self.sni_containers = sni_containers
self.peer_port = peer_port
self.pools = pools or []
class LoadBalancer(BaseDataModel):
@ -175,7 +183,7 @@ class LoadBalancer(BaseDataModel):
def __init__(self, id=None, project_id=None, name=None, description=None,
provisioning_status=None, operating_status=None, enabled=None,
topology=None, vip=None, listeners=None, amphorae=None,
vrrp_group=None):
pools=None, vrrp_group=None):
self.id = id
self.project_id = project_id
self.name = name
@ -188,6 +196,7 @@ class LoadBalancer(BaseDataModel):
self.topology = topology
self.listeners = listeners or []
self.amphorae = amphorae or []
self.pools = pools or []
class VRRPGroup(BaseDataModel):

View File

@ -94,16 +94,16 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
health_mon = self._health_mon_repo.get(db_apis.get_session(),
pool_id=pool_id)
listener = health_mon.pool.listener
listeners = health_mon.pool.listeners
health_mon.pool.health_monitor = health_mon
listener.default_pool = health_mon.pool
load_balancer = health_mon.pool.listener.load_balancer
load_balancer = health_mon.pool.load_balancer
create_hm_tf = self._taskflow_load(self._health_monitor_flows.
get_create_health_monitor_flow(),
store={constants.HEALTH_MON:
health_mon,
constants.LISTENER: listener,
constants.LISTENERS:
listeners,
constants.LOADBALANCER:
load_balancer})
with tf_logging.DynamicLoggingListener(create_hm_tf,
@ -120,14 +120,13 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
health_mon = self._health_mon_repo.get(db_apis.get_session(),
pool_id=pool_id)
listener = health_mon.pool.listener
listener.default_pool = health_mon.pool
load_balancer = listener.load_balancer
listeners = health_mon.pool.listeners
load_balancer = health_mon.pool.load_balancer
delete_hm_tf = self._taskflow_load(
self._health_monitor_flows.get_delete_health_monitor_flow(),
store={constants.HEALTH_MON: health_mon, constants.POOL_ID:
pool_id, constants.LISTENER: listener,
pool_id, constants.LISTENERS: listeners,
constants.LOADBALANCER: load_balancer})
with tf_logging.DynamicLoggingListener(delete_hm_tf,
log=LOG):
@ -144,16 +143,16 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
health_mon = self._health_mon_repo.get(db_apis.get_session(),
pool_id=pool_id)
listener = health_mon.pool.listener
listeners = health_mon.pool.listeners
health_mon.pool.health_monitor = health_mon
listener.default_pool = health_mon.pool
load_balancer = health_mon.pool.listener.load_balancer
load_balancer = health_mon.pool.load_balancer
update_hm_tf = self._taskflow_load(self._health_monitor_flows.
get_update_health_monitor_flow(),
store={constants.HEALTH_MON:
health_mon,
constants.LISTENER: listener,
constants.LISTENERS:
listeners,
constants.LOADBALANCER:
load_balancer,
constants.UPDATE_DICT:
@ -178,7 +177,9 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store={constants.LISTENER:
listener,
constants.LOADBALANCER:
load_balancer})
load_balancer,
constants.LISTENERS:
[listener]})
with tf_logging.DynamicLoggingListener(create_listener_tf,
log=LOG):
create_listener_tf.run()
@ -222,7 +223,9 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
constants.LOADBALANCER:
load_balancer,
constants.UPDATE_DICT:
listener_updates})
listener_updates,
constants.LISTENERS:
[listener]})
with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
update_listener_tf.run()
@ -324,15 +327,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
member = self._member_repo.get(db_apis.get_session(),
id=member_id)
listener = member.pool.listener
listener.default_pool = member.pool
load_balancer = listener.load_balancer
listeners = member.pool.listeners
load_balancer = member.pool.load_balancer
create_member_tf = self._taskflow_load(self._member_flows.
get_create_member_flow(),
store={constants.MEMBER: member,
constants.LISTENER:
listener,
constants.LISTENERS:
listeners,
constants.LOADBALANCER:
load_balancer})
with tf_logging.DynamicLoggingListener(create_member_tf,
@ -349,13 +351,12 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
member = self._member_repo.get(db_apis.get_session(),
id=member_id)
listener = member.pool.listener
listener.default_pool = member.pool
load_balancer = listener.load_balancer
listeners = member.pool.listeners
load_balancer = member.pool.load_balancer
delete_member_tf = self._taskflow_load(
self._member_flows.get_delete_member_flow(),
store={constants.MEMBER: member, constants.LISTENER: listener,
store={constants.MEMBER: member, constants.LISTENERS: listeners,
constants.LOADBALANCER: load_balancer})
with tf_logging.DynamicLoggingListener(delete_member_tf,
log=LOG):
@ -372,15 +373,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
member = self._member_repo.get(db_apis.get_session(),
id=member_id)
listener = member.pool.listener
listener.default_pool = member.pool
load_balancer = listener.load_balancer
listeners = member.pool.listeners
load_balancer = member.pool.load_balancer
update_member_tf = self._taskflow_load(self._member_flows.
get_update_member_flow(),
store={constants.MEMBER: member,
constants.LISTENER:
listener,
constants.LISTENERS:
listeners,
constants.LOADBALANCER:
load_balancer,
constants.UPDATE_DICT:
@ -399,15 +399,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = self._pool_repo.get(db_apis.get_session(),
id=pool_id)
listener = pool.listener
listener.default_pool = pool
load_balancer = listener.load_balancer
listeners = pool.listeners
load_balancer = pool.load_balancer
create_pool_tf = self._taskflow_load(self._pool_flows.
get_create_pool_flow(),
store={constants.POOL: pool,
constants.LISTENER:
listener,
constants.LISTENERS:
listeners,
constants.LOADBALANCER:
load_balancer})
with tf_logging.DynamicLoggingListener(create_pool_tf,
@ -424,12 +423,12 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = self._pool_repo.get(db_apis.get_session(),
id=pool_id)
load_balancer = pool.listener.load_balancer
listener = pool.listener
load_balancer = pool.load_balancer
listeners = pool.listeners
delete_pool_tf = self._taskflow_load(
self._pool_flows.get_delete_pool_flow(),
store={constants.POOL: pool, constants.LISTENER: listener,
store={constants.POOL: pool, constants.LISTENERS: listeners,
constants.LOADBALANCER: load_balancer})
with tf_logging.DynamicLoggingListener(delete_pool_tf,
log=LOG):
@ -446,15 +445,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = self._pool_repo.get(db_apis.get_session(),
id=pool_id)
listener = pool.listener
listener.default_pool = pool
load_balancer = listener.load_balancer
listeners = pool.listeners
load_balancer = pool.load_balancer
update_pool_tf = self._taskflow_load(self._pool_flows.
get_update_pool_flow(),
store={constants.POOL: pool,
constants.LISTENER:
listener,
constants.LISTENERS:
listeners,
constants.LOADBALANCER:
load_balancer,
constants.UPDATE_DICT:

View File

@ -322,7 +322,7 @@ class AmphoraFlows(object):
failover_amphora_flow.add(database_tasks.GetVipFromLoadbalancer(
requires=constants.LOADBALANCER, provides=constants.VIP))
failover_amphora_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=(constants.LISTENERS, constants.VIP)))
requires=(constants.LOADBALANCER, constants.LISTENERS)))
failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
requires=(constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
@ -355,7 +355,7 @@ class AmphoraFlows(object):
requires=constants.AMPHORA))
failover_amphora_flow.add(amphora_driver_tasks.ListenersStart(
requires=(constants.LISTENERS, constants.VIP)))
requires=(constants.LOADBALANCER, constants.LISTENERS)))
return failover_amphora_flow

View File

@ -29,10 +29,10 @@ class HealthMonitorFlows(object):
:returns: The flow for creating a health monitor
"""
create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW)
create_hm_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
create_hm_flow.add(database_tasks.MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENER]))
create_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return create_hm_flow
@ -45,12 +45,12 @@ class HealthMonitorFlows(object):
delete_hm_flow.add(model_tasks.
DeleteModelObject(rebind={constants.OBJECT:
constants.HEALTH_MON}))
delete_hm_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB(
requires=constants.POOL_ID))
delete_hm_flow.add(database_tasks.MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENER]))
delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return delete_hm_flow
@ -64,11 +64,11 @@ class HealthMonitorFlows(object):
UpdateAttributes(
rebind={constants.OBJECT: constants.HEALTH_MON},
requires=[constants.UPDATE_DICT]))
update_hm_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
update_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_hm_flow.add(database_tasks.UpdateHealthMonInDB(
requires=[constants.HEALTH_MON, constants.UPDATE_DICT]))
update_hm_flow.add(database_tasks.MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENER]))
update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return update_hm_flow

View File

@ -36,14 +36,14 @@ class ListenerFlows(object):
create_listener_flow.add(database_tasks.ReloadListener(
requires='listener',
provides='listener'))
create_listener_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_listener_flow.add(network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
create_listener_flow.add(database_tasks.
MarkLBAndListenerActiveInDB(
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
constants.LISTENERS]))
return create_listener_flow
def get_delete_listener_flow(self):
@ -74,13 +74,13 @@ class ListenerFlows(object):
rebind={constants.OBJECT:
constants.LISTENER},
requires=[constants.UPDATE_DICT]))
update_listener_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_listener_flow.add(database_tasks.UpdateListenerInDB(
requires=[constants.LISTENER, constants.UPDATE_DICT]))
update_listener_flow.add(database_tasks.
MarkLBAndListenerActiveInDB(
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
constants.LISTENERS]))
return update_listener_flow

View File

@ -38,12 +38,12 @@ class MemberFlows(object):
create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)
))
create_member_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=(constants.LOADBALANCER, constants.LISTENER)))
create_member_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=(constants.LOADBALANCER, constants.LISTENERS)))
create_member_flow.add(database_tasks.
MarkLBAndListenerActiveInDB(
MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER,
constants.LISTENER)))
constants.LISTENERS)))
return create_member_flow
@ -58,12 +58,12 @@ class MemberFlows(object):
constants.MEMBER}))
delete_member_flow.add(database_tasks.DeleteMemberInDB(
requires=constants.MEMBER))
delete_member_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
delete_member_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
delete_member_flow.add(database_tasks.
MarkLBAndListenerActiveInDB(
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
constants.LISTENERS]))
return delete_member_flow
@ -77,13 +77,13 @@ class MemberFlows(object):
UpdateAttributes(
rebind={constants.OBJECT: constants.MEMBER},
requires=[constants.UPDATE_DICT]))
update_member_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
update_member_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_member_flow.add(database_tasks.UpdateMemberInDB(
requires=[constants.MEMBER, constants.UPDATE_DICT]))
update_member_flow.add(database_tasks.
MarkLBAndListenerActiveInDB(
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
constants.LISTENERS]))
return update_member_flow

View File

@ -29,10 +29,10 @@ class PoolFlows(object):
:returns: The flow for creating a pool
"""
create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW)
create_pool_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
create_pool_flow.add(database_tasks.MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENER]))
create_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return create_pool_flow
@ -44,12 +44,12 @@ class PoolFlows(object):
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW)
delete_pool_flow.add(model_tasks.DeleteModelObject(
rebind={constants.OBJECT: constants.POOL}))
delete_pool_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
delete_pool_flow.add(database_tasks.DeletePoolInDB(
requires=constants.POOL))
delete_pool_flow.add(database_tasks.MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENER]))
delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return delete_pool_flow
@ -63,11 +63,11 @@ class PoolFlows(object):
UpdateAttributes(
rebind={constants.OBJECT: constants.POOL},
requires=[constants.UPDATE_DICT]))
update_pool_flow.add(amphora_driver_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER]))
update_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_pool_flow.add(database_tasks.UpdatePoolInDB(
requires=[constants.POOL, constants.UPDATE_DICT]))
update_pool_flow.add(database_tasks.MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENER]))
update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
return update_pool_flow

View File

@ -46,35 +46,18 @@ class BaseAmphoraTask(task.Task):
self.loadbalancer_repo = repo.LoadBalancerRepository()
class ListenerUpdate(BaseAmphoraTask):
"""Task to update an amphora with new configuration for the listener."""
def execute(self, loadbalancer, listener):
"""Execute listener update routines for an amphora."""
# Ideally this shouldn't be needed. This is a workaround, for a not
# very well understood bug not related to Octavia.
# https://bugs.launchpad.net/octavia/+bug/1492493
listener = self.listener_repo.get(db_apis.get_session(),
id=listener.id)
self.amphora_driver.update(listener, loadbalancer.vip)
LOG.debug("Updated amphora with new configuration for listener")
def revert(self, listener, *args, **kwargs):
"""Handle a failed listener update."""
LOG.warn(_LW("Reverting listener update."))
self.listener_repo.update(db_apis.get_session(), id=listener.id,
provisioning_status=constants.ERROR)
return None
class ListenersUpdate(BaseAmphoraTask):
"""Task to update amphora with all listeners' configurations."""
"""Task to update amphora with all specified listeners' configurations."""
def execute(self, listeners, vip):
def execute(self, loadbalancer, listeners):
"""Execute updates per listener for an amphora."""
for listener in listeners:
self.amphora_driver.update(listener, vip)
# Ideally this shouldn't be needed. This is a workaround, for a
# not very well understood bug not related to Octavia.
# https://bugs.launchpad.net/octavia/+bug/1492493
listener = self.listener_repo.get(db_apis.get_session(),
id=listener.id)
self.amphora_driver.update(listener, loadbalancer.vip)
def revert(self, listeners, *args, **kwargs):
"""Handle failed listeners updates."""
@ -123,10 +106,10 @@ class ListenerStart(BaseAmphoraTask):
class ListenersStart(BaseAmphoraTask):
"""Task to start all listeners on the vip."""
def execute(self, listeners, vip):
def execute(self, loadbalancer, listeners):
"""Execute listener start routines for listeners on an amphora."""
for listener in listeners:
self.amphora_driver.start(listener, vip)
self.amphora_driver.start(listener, loadbalancer.vip)
LOG.debug("Started the listeners on the vip")
def revert(self, listeners, *args, **kwargs):

View File

@ -688,37 +688,40 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
provisioning_status=constants.ERROR)
class MarkLBAndListenerActiveInDB(BaseDatabaseTask):
"""Mark the load balancer and listener active in the DB.
class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
"""Mark the load balancer and specified listeners active in the DB.
Since sqlalchemy will likely retry by itself always revert if it fails
"""
def execute(self, loadbalancer, listener):
"""Mark the load balancer and listener as active in DB."""
def execute(self, loadbalancer, listeners):
"""Mark the load balancer and listeners as active in DB."""
LOG.debug("Mark ACTIVE in DB for load balancer id: %(lb)s "
"and listener id: %(list)s",
{'lb': loadbalancer.id, 'list': listener.id})
LOG.debug("Mark ACTIVE in DB for load balancer id: %s "
"and listener ids: %s", loadbalancer.id,
', '.join([l.id for l in listeners]))
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
provisioning_status=constants.ACTIVE)
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ACTIVE)
for listener in listeners:
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ACTIVE)
def revert(self, loadbalancer, listener, *args, **kwargs):
"""Mark the load balancer and listener as broken."""
def revert(self, loadbalancer, listeners, *args, **kwargs):
"""Mark the load balancer and listeners as broken."""
LOG.warn(_LW("Reverting mark load balancer "
"and listener active in DB "
"and listeners active in DB "
"for load balancer id %(LB)s and "
"listener id: %(list)s"),
{'LB': loadbalancer.id, 'list': listener.id})
"listener ids: %(list)s"),
{'LB': loadbalancer.id,
'list': ', '.join([l.id for l in listeners])})
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
provisioning_status=constants.ERROR)
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
for listener in listeners:
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
class MarkListenerActiveInDB(BaseDatabaseTask):
@ -943,8 +946,8 @@ class UpdatePoolInDB(BaseDatabaseTask):
LOG.debug("Update DB for pool id: %s ", pool.id)
sp_dict = update_dict.pop('session_persistence', None)
self.repos.update_pool_on_listener(db_apis.get_session(), pool.id,
update_dict, sp_dict)
self.repos.update_pool_and_sp(db_apis.get_session(), pool.id,
update_dict, sp_dict)
def revert(self, pool, *args, **kwargs):
"""Mark the pool ERROR since the update couldn't happen
@ -955,8 +958,8 @@ class UpdatePoolInDB(BaseDatabaseTask):
LOG.warn(_LW("Reverting update pool in DB "
"for pool id %s"), pool.id)
# TODO(johnsom) fix this to set the upper ojects to ERROR
self.repos.update_pool_on_listener(db_apis.get_session(),
pool.id, {'enabled': 0}, None)
self.repos.update_pool_and_sp(db_apis.get_session(),
pool.id, {'enabled': 0}, None)
class GetAmphoraDetails(BaseDatabaseTask):

View File

@ -23,12 +23,13 @@ class OctaviaBase(models.ModelBase):
__data_model__ = None
def to_data_model(self, _calling_cls=None):
def to_data_model(self, _calling_classes=None):
"""Converts to a data model.
:param _calling_cls: Used only for internal recursion of this method.
Should not be called from the outside.
:param _calling_classes: Used only for internal recursion of this
method. Should not be called from the outside.
"""
calling_classes = _calling_classes or []
if not self.__data_model__:
raise NotImplementedError
dm_kwargs = {}
@ -38,18 +39,24 @@ class OctaviaBase(models.ModelBase):
if not attr_name.startswith('_')]
for attr_name in attr_names:
attr = getattr(self, attr_name)
if isinstance(attr, OctaviaBase
) and attr.__class__ != _calling_cls:
# Handle 1:N or M:N relationships
# Don't recurse down object classes too far. If we have seen the
# same object class more than twice, we are probably in a loop.
if (isinstance(attr, OctaviaBase)
and attr.__class__
and calling_classes.count(attr.__class__) < 2):
dm_kwargs[attr_name] = attr.to_data_model(
_calling_cls=self.__class__)
elif isinstance(attr, collections.InstrumentedList):
_calling_classes=calling_classes + [self.__class__])
elif isinstance(attr, (collections.InstrumentedList, list)):
dm_kwargs[attr_name] = []
for item in attr:
if isinstance(item, OctaviaBase
) and attr.__class__ != _calling_cls:
if (isinstance(item, OctaviaBase)
and item.__class__
and calling_classes.count(item.__class__) < 2):
dm_kwargs[attr_name].append(
item.to_data_model(
_calling_cls=self.__class__))
_calling_classes=(calling_classes +
[self.__class__])))
elif not isinstance(item, OctaviaBase):
dm_kwargs[attr_name].append(item)
return self.__data_model__(**dm_kwargs)

View File

@ -0,0 +1,76 @@
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Shared pools
Revision ID: 29ff921a6eb
Revises: 43287cd10fef
Create Date: 2015-12-09 10:32:12.712932
"""
# revision identifiers, used by Alembic.
revision = '29ff921a6eb'
down_revision = '43287cd10fef'
from alembic import op
import sqlalchemy as sa
def upgrade():
conn = op.get_bind()
# Minimal examples of the tables we need to manipulate
listener = sa.sql.table(
'listener',
sa.sql.column('load_balancer_id', sa.String),
sa.sql.column('default_pool_id', sa.String))
pool = sa.sql.table(
'pool',
sa.sql.column('load_balancer_id', sa.String),
sa.sql.column('id', sa.String))
# This foreign key does not need to be unique anymore. To remove the
# uniqueness but keep the foreign key we have to do some juggling.
op.drop_constraint('fk_listener_pool_id', 'listener',
type_='foreignkey')
op.drop_constraint('uq_listener_default_pool_id', 'listener',
type_='unique')
op.create_foreign_key('fk_listener_pool_id', 'listener',
'pool', ['default_pool_id'], ['id'])
op.add_column(u'pool',
sa.Column('load_balancer_id', sa.String(36),
sa.ForeignKey('load_balancer.id'), nullable=True))
# Populate this new column appropriately
select_obj = sa.select([listener.c.load_balancer_id,
listener.c.default_pool_id]).where(
listener.c.default_pool_id is not None)
result = conn.execute(select_obj)
for row in result:
stmt = pool.update().values(load_balancer_id=row[0]).where(
pool.c.id == row[1])
op.execute(stmt)
# For existing installations, the above ETL should populate the above column
# using the following procedure:
#
# Get the output from this:
#
# SELECT default_pool_id, load_balancer_id l_id FROM listener WHERE
# default_pool_id IS NOT NULL;
#
# Then for every row returned run:
#
# UPDATE pool SET load_balancer_id = l_id WHERE id = default_pool_id;

View File

@ -206,6 +206,21 @@ class Pool(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin):
name="fk_pool_operating_status_name"),
nullable=False)
enabled = sa.Column(sa.Boolean, nullable=False)
load_balancer_id = sa.Column(
sa.String(36),
sa.ForeignKey("load_balancer.id", name="fk_pool_load_balancer_id"),
nullable=True)
load_balancer = orm.relationship("LoadBalancer", uselist=False,
backref=orm.backref("pools",
uselist=True,
cascade="delete"))
# Defining this as a custom method instead of an SQLAlchemy relationship
# for now. When L7 gets added, this list will also include any listeners
# referenced by enabled L7policies
@property
def listeners(self):
return self._default_listeners
class LoadBalancer(base_models.BASE, base_models.IdMixin,
@ -289,8 +304,6 @@ class Listener(base_models.BASE, base_models.IdMixin,
__table_args__ = (
sa.UniqueConstraint('load_balancer_id', 'protocol_port',
name='uq_listener_load_balancer_id_protocol_port'),
sa.UniqueConstraint('default_pool_id',
name='uq_listener_default_pool_id')
)
name = sa.Column(sa.String(255), nullable=True)
@ -309,7 +322,7 @@ class Listener(base_models.BASE, base_models.IdMixin,
default_pool_id = sa.Column(
sa.String(36),
sa.ForeignKey("pool.id", name="fk_listener_pool_id"),
unique=True, nullable=True)
nullable=True)
provisioning_status = sa.Column(
sa.String(16),
sa.ForeignKey("provisioning_status.name",
@ -325,12 +338,24 @@ class Listener(base_models.BASE, base_models.IdMixin,
backref=orm.backref("listeners",
uselist=True,
cascade="delete"))
# _default_listeners backref is used to generate part of pool.listeners
# list.
default_pool = orm.relationship("Pool", uselist=False,
backref=orm.backref("listener",
uselist=False),
cascade="delete")
backref=orm.backref("_default_listeners",
uselist=True))
peer_port = sa.Column(sa.Integer(), nullable=True)
# Defining this as a custom method instead of an SQLAlchemy relationship
# for now. When L7 gets added, this list will also include any pools
# referenced by enabled L7policies
@property
def pools(self):
_pools = []
_p_ids = [p.id for p in _pools]
if self.default_pool and self.default_pool.id not in _p_ids:
_pools.append(self.default_pool)
return _pools
class SNI(base_models.BASE):

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -23,6 +24,7 @@ from oslo_config import cfg
from oslo_utils import uuidutils
from octavia.common import constants
from octavia.common import data_models
from octavia.common import exceptions
from octavia.db import models
@ -154,13 +156,14 @@ class Repositories(object):
session.add(vip)
return self.load_balancer.get(session, id=lb.id)
def create_pool_on_listener(self, session, listener_id,
pool_dict, sp_dict=None):
def create_pool_on_load_balancer(self, session, pool_dict,
listener_id=None, sp_dict=None):
"""Inserts a pool and session persistence entity into the database.
:param session: A Sql Alchemy database session.
:param listener_id: id of the listener the pool will be referenced by
:param pool_dict: Dictionary representation of a pool
:param listener_id: Optional listener id that will
reference this pool as its default_pool_id
:param sp_dict: Dictionary representation of a session persistence
:returns: octavia.common.data_models.Pool
"""
@ -171,11 +174,12 @@ class Repositories(object):
if sp_dict:
sp_dict['pool_id'] = pool_dict['id']
self.session_persistence.create(session, **sp_dict)
self.listener.update(session, listener_id,
default_pool_id=pool_dict['id'])
if listener_id:
self.listener.update(session, listener_id,
default_pool_id=pool_dict['id'])
return self.pool.get(session, id=db_pool.id)
def update_pool_on_listener(self, session, pool_id, pool_dict, sp_dict):
def update_pool_and_sp(self, session, pool_id, pool_dict, sp_dict):
"""Updates a pool and session persistence entity in the database.
:param session: A Sql Alchemy database session.
@ -198,9 +202,10 @@ class Repositories(object):
db_pool = self.pool.get(session, id=pool_id)
return db_pool
def test_and_set_lb_and_listener_prov_status(self, session, lb_id,
listener_id, lb_prov_status,
listener_prov_status):
def test_and_set_lb_and_listeners_prov_status(self, session, lb_id,
lb_prov_status,
listener_prov_status,
listener_ids=None):
"""Tests and sets a load balancer and listener provisioning status.
Puts a lock on the load balancer table to check the status of a
@ -210,16 +215,31 @@ class Repositories(object):
:param session: A Sql Alchemy database session.
:param lb_id: id of Load Balancer
:param listener_id: id of a Listener
:param lb_prov_status: Status to set Load Balancer and Listener if
check passes.
:param listener_prov_status: Status to set Listeners if check passes
:param listener_ids: List of ids of a listeners (can be empty)
:returns: bool
"""
success = self.load_balancer.test_and_set_provisioning_status(
session, lb_id, lb_prov_status)
self.listener.update(session, listener_id,
provisioning_status=listener_prov_status)
return success
listener_ids = listener_ids or []
# Only update LB if we have listeners.
if listener_ids:
success = self.load_balancer.test_and_set_provisioning_status(
session, lb_id, lb_prov_status)
for id in listener_ids:
self.listener.update(session, id,
provisioning_status=listener_prov_status)
return success
else:
# Just make sure LB is mutable, even though we're not really
# changing anything on it
lb = session.query(
models.LoadBalancer).with_for_update().filter_by(
id=lb_id).one()
if lb.provisioning_status not in constants.MUTABLE_STATUSES:
return False
else:
return True
class LoadBalancerRepository(BaseRepository):
@ -298,8 +318,27 @@ class MemberRepository(BaseRepository):
class ListenerRepository(BaseRepository):
model_class = models.Listener
def has_pool(self, session, id):
"""Checks if a listener has a pool."""
def _pool_check(self, session, pool_id, listener_id=None,
lb_id=None):
"""Sanity checks for default_pool_id if specified."""
# Pool must exist on same loadbalancer as listener
pool_db = None
if listener_id:
lb_subquery = (session.query(self.model_class.load_balancer_id).
filter_by(id=listener_id).subquery())
pool_db = (session.query(models.Pool).
filter_by(id=pool_id).
filter(models.LoadBalancer.id.in_(lb_subquery)).first())
elif lb_id:
pool_db = (session.query(models.Pool).
filter_by(id=pool_id).
filter_by(load_balancer_id=lb_id).first())
if not pool_db:
raise exceptions.NotFound(
resource=data_models.Pool._name(), id=pool_id)
def has_default_pool(self, session, id):
"""Checks if a listener has a default pool."""
listener = self.get(session, id=id)
return bool(listener.default_pool)
@ -307,6 +346,10 @@ class ListenerRepository(BaseRepository):
with session.begin(subtransactions=True):
listener_db = session.query(self.model_class).filter_by(
id=id).first()
# Verify any newly specified default_pool_id exists
default_pool_id = model_kwargs.get('default_pool_id')
if default_pool_id:
self._pool_check(session, default_pool_id, listener_id=id)
sni_containers = model_kwargs.pop('sni_containers', [])
for container_ref in sni_containers:
sni = models.SNI(listener_id=id,
@ -314,6 +357,18 @@ class ListenerRepository(BaseRepository):
listener_db.sni_containers.append(sni)
listener_db.update(model_kwargs)
def create(self, session, **model_kwargs):
"""Creates a new Listener with a some validation."""
with session.begin(subtransactions=True):
# Verify any specified default_pool_id exists
default_pool_id = model_kwargs.get('default_pool_id')
if default_pool_id:
self._pool_check(session, default_pool_id,
lb_id=model_kwargs.get('load_balancer_id'))
model = self.model_class(**model_kwargs)
session.add(model)
return model.to_data_model()
class ListenerStatisticsRepository(BaseRepository):
model_class = models.ListenerStatistics

View File

@ -30,11 +30,13 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
LB_PATH = LBS_PATH + '/{lb_id}'
LISTENERS_PATH = LB_PATH + '/listeners'
LISTENER_PATH = LISTENERS_PATH + '/{listener_id}'
POOLS_PATH = LISTENER_PATH + '/pools'
POOLS_PATH = LB_PATH + '/pools'
POOL_PATH = POOLS_PATH + '/{pool_id}'
MEMBERS_PATH = POOL_PATH + '/members'
DEPRECATED_POOLS_PATH = LISTENER_PATH + '/pools'
DEPRECATED_POOL_PATH = DEPRECATED_POOLS_PATH + '/{pool_id}'
MEMBERS_PATH = DEPRECATED_POOL_PATH + '/members'
MEMBER_PATH = MEMBERS_PATH + '/{member_id}'
HM_PATH = POOL_PATH + '/healthmonitor'
HM_PATH = DEPRECATED_POOL_PATH + '/healthmonitor'
def setUp(self):
super(BaseAPITest, self).setUp()
@ -44,7 +46,7 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
self.member_repo = repositories.MemberRepository()
patcher = mock.patch('octavia.api.v1.handlers.controller_simulator.'
'handler.SimulatedControllerHandler')
patcher.start()
self.handler_mock = patcher.start()
self.app = self._make_app()
def reset_pecan():
@ -112,11 +114,20 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
response = self.post(path, req_dict)
return response.json
def create_pool_sans_listener(self, lb_id, protocol, lb_algorithm,
**optionals):
req_dict = {'protocol': protocol, 'lb_algorithm': lb_algorithm}
req_dict.update(optionals)
path = self.POOLS_PATH.format(lb_id=lb_id)
response = self.post(path, req_dict)
return response.json
def create_pool(self, lb_id, listener_id, protocol, lb_algorithm,
**optionals):
req_dict = {'protocol': protocol, 'lb_algorithm': lb_algorithm}
req_dict.update(optionals)
path = self.POOLS_PATH.format(lb_id=lb_id, listener_id=listener_id)
path = self.DEPRECATED_POOLS_PATH.format(lb_id=lb_id,
listener_id=listener_id)
response = self.post(path, req_dict)
return response.json

View File

@ -88,6 +88,27 @@ class TestHealthMonitor(base.BaseAPITest):
def test_bad_create(self):
hm_json = {'name': 'test1'}
self.post(self.hm_path, hm_json, status=400)
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.ACTIVE, constants.ONLINE)
def test_create_with_bad_handler(self):
self.handler_mock().health_monitor.create.side_effect = Exception()
self.create_health_monitor(self.lb.get('id'),
self.listener.get('id'),
self.pool.get('id'),
constants.HEALTH_MONITOR_HTTP,
1, 1, 1, 1)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_duplicate_create(self):
api_hm = self.create_health_monitor(self.lb.get('id'),
@ -131,6 +152,29 @@ class TestHealthMonitor(base.BaseAPITest):
new_hm = {'type': 'bad_type', 'delay': 2}
self.set_lb_status(self.lb.get('id'))
self.put(self.hm_path, new_hm, status=400)
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.ACTIVE, constants.ONLINE)
def test_update_with_bad_handler(self):
self.create_health_monitor(self.lb.get('id'), self.listener.get('id'),
self.pool.get('id'),
constants.HEALTH_MONITOR_HTTP,
1, 1, 1, 1)
self.set_lb_status(lb_id=self.lb.get('id'))
new_hm = {'type': constants.HEALTH_MONITOR_HTTPS}
self.handler_mock().health_monitor.update.side_effect = Exception()
self.put(self.hm_path, new_hm)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_delete(self):
api_hm = self.create_health_monitor(self.lb.get('id'),
@ -160,6 +204,25 @@ class TestHealthMonitor(base.BaseAPITest):
def test_bad_delete(self):
self.delete(self.hm_path, status=404)
def test_delete_with_bad_handler(self):
api_hm = self.create_health_monitor(self.lb.get('id'),
self.listener.get('id'),
self.pool['id'],
constants.HEALTH_MONITOR_HTTP,
1, 1, 1, 1)
self.set_lb_status(lb_id=self.lb.get('id'))
response = self.get(self.hm_path)
self.assertEqual(api_hm, response.json)
self.handler_mock().health_monitor.delete.side_effect = Exception()
self.delete(self.hm_path)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_create_when_lb_pending_update(self):
self.put(self.LB_PATH.format(lb_id=self.lb.get('id')),
body={'name': 'test_name_change'})
@ -207,4 +270,4 @@ class TestHealthMonitor(base.BaseAPITest):
constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1)
self.set_lb_status(self.lb.get('id'))
self.delete(self.LB_PATH.format(lb_id=self.lb.get('id')))
self.delete(self.hm_path, status=409)
self.delete(self.hm_path, status=409)

View File

@ -1,4 +1,5 @@
# Copyright 2014 Rackspace
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -26,6 +27,9 @@ class TestListener(base.BaseAPITest):
self.set_lb_status(self.lb.get('id'))
self.listeners_path = self.LISTENERS_PATH.format(
lb_id=self.lb.get('id'))
self.pool = self.create_pool_sans_listener(
self.lb.get('id'), constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
def test_get_all(self):
listener1 = self.create_listener(self.lb.get('id'),
@ -76,7 +80,8 @@ class TestListener(base.BaseAPITest):
def test_create(self, **optionals):
sni1 = uuidutils.generate_uuid()
sni2 = uuidutils.generate_uuid()
lb_listener = {'name': 'listener1', 'description': 'desc1',
lb_listener = {'name': 'listener1', 'default_pool_id': None,
'description': 'desc1',
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
'protocol_port': 80, 'connection_limit': 10,
'tls_certificate_id': uuidutils.generate_uuid(),
@ -106,9 +111,46 @@ class TestListener(base.BaseAPITest):
self.assert_final_listener_statuses(self.lb.get('id'),
listener_api.get('id'))
def test_create_with_default_pool_id(self):
lb_listener = {'name': 'listener1',
'default_pool_id': self.pool.get('id'),
'description': 'desc1',
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
'protocol_port': 80}
response = self.post(self.listeners_path, lb_listener)
api_listener = response.json
self.assertEqual(api_listener.get('default_pool_id'),
self.pool.get('id'))
def test_create_with_bad_default_pool_id(self):
lb_listener = {'name': 'listener1',
'default_pool_id': uuidutils.generate_uuid(),
'description': 'desc1',
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
'protocol_port': 80}
self.post(self.listeners_path, lb_listener, status=404)
def test_create_with_id(self):
self.test_create(id=uuidutils.generate_uuid())
def test_create_with_shared_default_pool_id(self):
lb_listener1 = {'name': 'listener1',
'default_pool_id': self.pool.get('id'),
'description': 'desc1',
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
'protocol_port': 80}
lb_listener2 = {'name': 'listener2',
'default_pool_id': self.pool.get('id'),
'description': 'desc2',
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
'protocol_port': 81}
listener1 = self.post(self.listeners_path, lb_listener1).json
self.set_lb_status(self.lb.get('id'), constants.ACTIVE)
listener2 = self.post(self.listeners_path, lb_listener2).json
self.assertEqual(listener1['default_pool_id'], self.pool.get('id'))
self.assertEqual(listener1['default_pool_id'],
listener2['default_pool_id'])
def test_create_with_project_id(self):
self.test_create(project_id=uuidutils.generate_uuid())
@ -123,7 +165,8 @@ class TestListener(base.BaseAPITest):
self.post(path, body, status=409, expect_errors=True)
def test_create_defaults(self):
defaults = {'name': None, 'description': None, 'enabled': True,
defaults = {'name': None, 'default_pool_id': None,
'description': None, 'enabled': True,
'connection_limit': None, 'tls_certificate_id': None,
'sni_containers': [], 'project_id': None}
lb_listener = {'protocol': constants.PROTOCOL_HTTP,
@ -150,14 +193,16 @@ class TestListener(base.BaseAPITest):
constants.PROTOCOL_TCP, 80,
name='listener1', description='desc1',
enabled=False, connection_limit=10,
tls_certificate_id=tls_uuid)
tls_certificate_id=tls_uuid,
default_pool_id=None)
self.set_lb_status(self.lb.get('id'))
new_listener = {'name': 'listener2', 'enabled': True}
new_listener = {'name': 'listener2', 'enabled': True,
'default_pool_id': self.pool.get('id')}
listener_path = self.LISTENER_PATH.format(
lb_id=self.lb.get('id'), listener_id=listener.get('id'))
response = self.put(listener_path, new_listener)
api_listener = response.json
api_listener = self.put(listener_path, new_listener).json
update_expect = {'name': 'listener2', 'enabled': True,
'default_pool_id': self.pool.get('id'),
'provisioning_status': constants.PENDING_UPDATE,
'operating_status': constants.ONLINE}
listener.update(update_expect)
@ -173,6 +218,25 @@ class TestListener(base.BaseAPITest):
listener_id='SEAN-CONNERY')
self.put(listener_path, body={}, status=404)
def test_update_with_bad_default_pool_id(self):
bad_pool_uuid = uuidutils.generate_uuid()
listener = self.create_listener(self.lb.get('id'),
constants.PROTOCOL_TCP, 80,
name='listener1', description='desc1',
enabled=False, connection_limit=10,
default_pool_id=self.pool.get('id'))
self.set_lb_status(self.lb.get('id'))
new_listener = {'name': 'listener2', 'enabled': True,
'default_pool_id': bad_pool_uuid}
listener_path = self.LISTENER_PATH.format(
lb_id=self.lb.get('id'), listener_id=listener.get('id'))
self.put(listener_path, new_listener, status=404)
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_final_listener_statuses(self.lb.get('id'),
listener.get('id'))
def test_create_listeners_same_port(self):
listener1 = self.create_listener(self.lb.get('id'),
constants.PROTOCOL_TCP, 80)
@ -204,7 +268,8 @@ class TestListener(base.BaseAPITest):
self.delete(listener_path)
response = self.get(listener_path)
api_listener = response.json
expected = {'name': None, 'description': None, 'enabled': True,
expected = {'name': None, 'default_pool_id': None,
'description': None, 'enabled': True,
'operating_status': constants.ONLINE,
'provisioning_status': constants.PENDING_DELETE,
'connection_limit': None}

View File

@ -134,6 +134,20 @@ class TestMember(base.BaseAPITest):
api_member = {'name': 'test1'}
self.post(self.members_path, api_member, status=400)
def test_create_with_bad_handler(self):
self.handler_mock().member.create.side_effect = Exception()
self.create_member(self.lb.get('id'),
self.listener.get('id'),
self.pool.get('id'),
'10.0.0.1', 80)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_duplicate_create(self):
member = {'ip_address': '10.0.0.1', 'protocol_port': 80}
self.post(self.members_path, member, status=202)
@ -178,6 +192,24 @@ class TestMember(base.BaseAPITest):
self.put(self.member_path.format(member_id=api_member.get('id')),
new_member, expect_errors=True)
def test_update_with_bad_handler(self):
api_member = self.create_member(self.lb.get('id'),
self.listener.get('id'),
self.pool.get('id'),
'10.0.0.1', 80)
self.set_lb_status(self.lb.get('id'))
new_member = {'protocol_port': 88}
self.handler_mock().member.update.side_effect = Exception()
self.put(self.member_path.format(
member_id=api_member.get('id')), new_member, status=202)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_duplicate_update(self):
self.skip('This test should pass after a validation layer.')
member = {'ip_address': '10.0.0.1', 'protocol_port': 80}
@ -221,6 +253,26 @@ class TestMember(base.BaseAPITest):
self.delete(self.member_path.format(
member_id=uuidutils.generate_uuid()), status=404)
def test_delete_with_bad_handler(self):
api_member = self.create_member(self.lb.get('id'),
self.listener.get('id'),
self.pool.get('id'),
'10.0.0.1', 80)
self.set_lb_status(self.lb.get('id'))
response = self.get(self.member_path.format(
member_id=api_member.get('id')))
api_member['operating_status'] = constants.ONLINE
self.assertEqual(api_member, response.json)
self.handler_mock().member.delete.side_effect = Exception()
self.delete(self.member_path.format(member_id=api_member.get('id')))
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_create_when_lb_pending_update(self):
self.create_member(self.lb.get('id'), self.listener.get('id'),
self.pool.get('id'), ip_address="10.0.0.2",

View File

@ -27,17 +27,19 @@ class TestPool(base.BaseAPITest):
self.listener = self.create_listener(self.lb.get('id'),
constants.PROTOCOL_HTTP, 80)
self.set_lb_status(self.lb.get('id'))
self.pools_path = self.POOLS_PATH.format(
lb_id=self.lb.get('id'), listener_id=self.listener.get('id'))
self.pools_path = self.POOLS_PATH.format(lb_id=self.lb.get('id'))
self.pool_path = self.pools_path + '/{pool_id}'
self.pools_path_with_listener = (self.pools_path +
'?listener_id={listener_id}')
self.pools_path_deprecated = self.DEPRECATED_POOLS_PATH.format(
lb_id=self.lb.get('id'), listener_id=self.listener.get('id'))
self.pool_path_deprecated = self.pools_path_deprecated + '/{pool_id}'
def test_get(self):
api_pool = self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
api_pool = self.create_pool_sans_listener(
self.lb.get('id'), constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(lb_id=self.lb.get('id'))
api_pool['operating_status'] = constants.ONLINE
response = self.get(self.pool_path.format(pool_id=api_pool.get('id')))
response_body = response.json
self.assertEqual(api_pool, response_body)
@ -47,10 +49,9 @@ class TestPool(base.BaseAPITest):
status=404)
def test_get_all(self):
api_pool = self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
api_pool = self.create_pool_sans_listener(
self.lb.get('id'), constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(lb_id=self.lb.get('id'))
response = self.get(self.pools_path)
response_body = response.json
@ -58,6 +59,23 @@ class TestPool(base.BaseAPITest):
self.assertEqual(1, len(response_body))
self.assertEqual(api_pool.get('id'), response_body[0].get('id'))
def test_get_all_with_listener(self):
api_pool = self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(lb_id=self.lb.get('id'))
response = self.get(self.pools_path_with_listener.format(
listener_id=self.listener.get('id')))
response_body = response.json
self.assertIsInstance(response_body, list)
self.assertEqual(1, len(response_body))
self.assertEqual(api_pool.get('id'), response_body[0].get('id'))
def test_get_all_with_bad_listener(self):
self.get(self.pools_path_with_listener.format(
listener_id='bad_id'), status=404, expect_errors=True)
def test_empty_get_all(self):
response = self.get(self.pools_path)
response_body = response.json
@ -87,6 +105,46 @@ class TestPool(base.BaseAPITest):
self.listener.get('id'),
constants.ACTIVE, constants.ONLINE)
def test_create_sans_listener(self):
api_pool = self.create_pool_sans_listener(
self.lb.get('id'), constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol'))
self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN,
api_pool.get('lb_algorithm'))
# Make sure listener / load balancer status are unchanged, as
# this should have been a pure DB operation
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
def test_create_with_listener_id_in_pool_dict(self):
api_pool = self.create_pool_sans_listener(
self.lb.get('id'), constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN,
listener_id=self.listener.get('id'))
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.set_lb_status(self.lb.get('id'))
self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol'))
self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN,
api_pool.get('lb_algorithm'))
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.ACTIVE, constants.ONLINE)
def test_create_with_id(self):
pid = uuidutils.generate_uuid()
api_pool = self.create_pool(self.lb.get('id'),
@ -120,18 +178,44 @@ class TestPool(base.BaseAPITest):
def test_bad_create(self):
api_pool = {'name': 'test1'}
self.post(self.pools_path, api_pool, status=400)
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.ACTIVE, constants.ONLINE)
def test_duplicate_create(self):
pool = {'protocol': constants.PROTOCOL_HTTP,
def test_create_with_listener_with_default_pool_id_set(self):
self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(self.lb.get('id'), constants.ACTIVE)
path = self.pools_path_deprecated.format(
lb_id=self.lb.get('id'), listener_id=self.listener.get('id'))
body = {'protocol': constants.PROTOCOL_HTTP,
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}
self.post(self.pools_path, pool)
self.post(self.pools_path, pool, status=409)
self.post(path, body, status=409, expect_errors=True)
def test_create_bad_protocol(self):
pool = {'protocol': 'STUPID_PROTOCOL',
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}
self.post(self.pools_path, pool, status=400)
def test_create_with_bad_handler(self):
self.handler_mock().pool.create.side_effect = Exception()
self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_update(self):
api_pool = self.create_pool(self.lb.get('id'),
self.listener.get('id'),
@ -164,9 +248,34 @@ class TestPool(base.BaseAPITest):
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(self.lb.get('id'))
new_pool = {'enabled': 'one'}
self.put(self.pool_path.format(pool_id=api_pool.get('id')),
new_pool, status=400)
self.assert_correct_lb_status(self.lb.get('id'),
constants.ACTIVE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.ACTIVE, constants.ONLINE)
def test_update_with_bad_handler(self):
api_pool = self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(lb_id=self.lb.get('id'))
new_pool = {'name': 'new_name'}
self.handler_mock().pool.update.side_effect = Exception()
self.put(self.pool_path.format(pool_id=api_pool.get('id')),
new_pool, status=202)
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_delete(self):
api_pool = self.create_pool(self.lb.get('id'),
@ -198,6 +307,26 @@ class TestPool(base.BaseAPITest):
self.delete(self.pool_path.format(
pool_id=uuidutils.generate_uuid()), status=404)
def test_delete_with_bad_handler(self):
api_pool = self.create_pool(self.lb.get('id'),
self.listener.get('id'),
constants.PROTOCOL_HTTP,
constants.LB_ALGORITHM_ROUND_ROBIN)
self.set_lb_status(lb_id=self.lb.get('id'))
api_pool['operating_status'] = constants.ONLINE
response = self.get(self.pool_path.format(
pool_id=api_pool.get('id')))
self.assertEqual(api_pool, response.json)
self.handler_mock().pool.delete.side_effect = Exception()
self.delete(self.pool_path.format(pool_id=api_pool.get('id')))
self.assert_correct_lb_status(self.lb.get('id'),
constants.PENDING_UPDATE,
constants.ONLINE)
self.assert_correct_listener_status(self.lb.get('id'),
self.listener.get('id'),
constants.PENDING_UPDATE,
constants.ERROR)
def test_create_with_session_persistence(self):
sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE,
"cookie_name": "test_cookie_name"}

View File

@ -196,8 +196,9 @@ class PoolModelTest(base.OctaviaDBTestBase, ModelTestMixin):
self.create_listener(self.session, default_pool_id=pool.id)
new_pool = self.session.query(models.Pool).filter_by(
id=pool.id).first()
self.assertIsNotNone(new_pool.listener)
self.assertIsInstance(new_pool.listener, models.Listener)
self.assertIsNotNone(new_pool.listeners)
self.assertIsInstance(new_pool.listeners, list)
self.assertIsInstance(new_pool.listeners[0], models.Listener)
class MemberModelTest(base.OctaviaDBTestBase, ModelTestMixin):
@ -727,7 +728,7 @@ class DataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
self.assertIsInstance(pool, data_models.Pool)
self.check_pool_data_model(pool)
if check_listener:
self.check_listener(pool.listener, check_pool=False)
self.check_listener(pool.listeners[0], check_pool=False)
if check_sp:
self.check_session_persistence(pool.session_persistence,
check_pool=False)

View File

@ -21,6 +21,7 @@ from oslo_utils import uuidutils
from octavia.common import constants
from octavia.common import data_models as models
from octavia.common import exceptions
from octavia.db import repositories as repo
from octavia.tests.functional.db import base
@ -35,6 +36,7 @@ class BaseRepositoryTest(base.OctaviaDBTestBase):
FAKE_UUID_1 = uuidutils.generate_uuid()
FAKE_UUID_2 = uuidutils.generate_uuid()
FAKE_UUID_3 = uuidutils.generate_uuid()
FAKE_UUID_4 = uuidutils.generate_uuid()
FAKE_EXP_AGE = 10
def setUp(self):
@ -69,13 +71,22 @@ class BaseRepositoryTest(base.OctaviaDBTestBase):
class AllRepositoriesTest(base.OctaviaDBTestBase):
FAKE_UUID_1 = uuidutils.generate_uuid()
FAKE_UUID_2 = uuidutils.generate_uuid()
def setUp(self):
super(AllRepositoriesTest, self).setUp()
self.repos = repo.Repositories()
self.load_balancer = self.repos.load_balancer.create(
self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2,
name="lb_name", description="lb_description",
provisioning_status=constants.ACTIVE,
operating_status=constants.ONLINE, enabled=True)
self.listener = self.repos.listener.create(
self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80,
enabled=True, provisioning_status=constants.ACTIVE,
operating_status=constants.ONLINE)
operating_status=constants.ONLINE,
load_balancer_id=self.load_balancer.id)
def test_all_repos_has_correct_repos(self):
repo_attr_names = ('load_balancer', 'vip', 'health_monitor',
@ -118,6 +129,7 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
del lb_dm_dict['vip']
del lb_dm_dict['listeners']
del lb_dm_dict['amphorae']
del lb_dm_dict['pools']
self.assertEqual(lb, lb_dm_dict)
vip_dm_dict = lb_dm.vip.to_dict()
vip_dm_dict['load_balancer_id'] = lb_dm.id
@ -130,14 +142,15 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
'enabled': True, 'operating_status': constants.ONLINE,
'project_id': uuidutils.generate_uuid()}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id,
pool)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id)
pool_dm_dict = pool_dm.to_dict()
del pool_dm_dict['members']
del pool_dm_dict['health_monitor']
del pool_dm_dict['session_persistence']
del pool_dm_dict['listener']
del pool_dm_dict['listeners']
del pool_dm_dict['load_balancer']
del pool_dm_dict['load_balancer_id']
self.assertEqual(pool, pool_dm_dict)
new_listener = self.repos.listener.get(self.session,
id=self.listener.id)
@ -151,14 +164,15 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
'project_id': uuidutils.generate_uuid()}
sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE,
'cookie_name': 'cookie_monster'}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id,
pool, sp_dict=sp)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id, sp_dict=sp)
pool_dm_dict = pool_dm.to_dict()
del pool_dm_dict['members']
del pool_dm_dict['health_monitor']
del pool_dm_dict['session_persistence']
del pool_dm_dict['listener']
del pool_dm_dict['listeners']
del pool_dm_dict['load_balancer']
del pool_dm_dict['load_balancer_id']
self.assertEqual(pool, pool_dm_dict)
sp_dm_dict = pool_dm.session_persistence.to_dict()
del sp_dm_dict['pool']
@ -171,27 +185,29 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
pool_id=pool_dm.id)
self.assertIsNotNone(new_sp)
def test_update_pool_on_listener_without_sp(self):
def test_update_pool_without_sp(self):
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1',
'description': 'desc1',
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
'enabled': True, 'operating_status': constants.ONLINE,
'project_id': uuidutils.generate_uuid()}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id, pool)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id)
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'}
new_pool_dm = self.repos.update_pool_on_listener(
new_pool_dm = self.repos.update_pool_and_sp(
self.session, pool_dm.id, update_pool, None)
pool_dm_dict = new_pool_dm.to_dict()
del pool_dm_dict['members']
del pool_dm_dict['health_monitor']
del pool_dm_dict['session_persistence']
del pool_dm_dict['listener']
del pool_dm_dict['listeners']
del pool_dm_dict['load_balancer']
del pool_dm_dict['load_balancer_id']
pool.update(update_pool)
self.assertEqual(pool, pool_dm_dict)
self.assertIsNone(new_pool_dm.session_persistence)
def test_update_pool_on_listener_with_existing_sp(self):
def test_update_pool_with_existing_sp(self):
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1',
'description': 'desc1',
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
@ -199,18 +215,19 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
'project_id': uuidutils.generate_uuid()}
sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE,
'cookie_name': 'cookie_monster'}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id,
pool, sp_dict=sp)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id, sp_dict=sp)
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'}
update_sp = {'type': constants.SESSION_PERSISTENCE_SOURCE_IP}
new_pool_dm = self.repos.update_pool_on_listener(
new_pool_dm = self.repos.update_pool_and_sp(
self.session, pool_dm.id, update_pool, update_sp)
pool_dm_dict = new_pool_dm.to_dict()
del pool_dm_dict['members']
del pool_dm_dict['health_monitor']
del pool_dm_dict['session_persistence']
del pool_dm_dict['listener']
del pool_dm_dict['listeners']
del pool_dm_dict['load_balancer']
del pool_dm_dict['load_balancer_id']
pool.update(update_pool)
self.assertEqual(pool, pool_dm_dict)
sp_dm_dict = new_pool_dm.session_persistence.to_dict()
@ -219,19 +236,18 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
sp.update(update_sp)
self.assertEqual(sp, sp_dm_dict)
def test_update_pool_on_listener_with_nonexisting_sp(self):
def test_update_pool_with_nonexisting_sp(self):
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1',
'description': 'desc1',
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
'enabled': True, 'operating_status': constants.ONLINE,
'project_id': uuidutils.generate_uuid()}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id,
pool)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id)
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'}
update_sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE,
'cookie_name': 'monster_cookie'}
new_pool_dm = self.repos.update_pool_on_listener(
new_pool_dm = self.repos.update_pool_and_sp(
self.session, pool_dm.id, update_pool, update_sp)
sp_dm_dict = new_pool_dm.session_persistence.to_dict()
del sp_dm_dict['pool']
@ -239,21 +255,20 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
update_sp.update(update_sp)
self.assertEqual(update_sp, sp_dm_dict)
def test_update_pool_on_listener_with_nonexisting_sp_delete_sp(self):
def test_update_pool_with_nonexisting_sp_delete_sp(self):
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1',
'description': 'desc1',
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
'enabled': True, 'operating_status': constants.ONLINE,
'project_id': uuidutils.generate_uuid()}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id,
pool)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id)
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'}
new_pool_dm = self.repos.update_pool_on_listener(
new_pool_dm = self.repos.update_pool_and_sp(
self.session, pool_dm.id, update_pool, None)
self.assertIsNone(new_pool_dm.session_persistence)
def test_update_pool_on_listener_with_existing_sp_delete_sp(self):
def test_update_pool_with_existing_sp_delete_sp(self):
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1',
'description': 'desc1',
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
@ -261,11 +276,10 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
'project_id': uuidutils.generate_uuid()}
sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE,
'cookie_name': 'cookie_monster'}
pool_dm = self.repos.create_pool_on_listener(self.session,
self.listener.id,
pool, sp_dict=sp)
pool_dm = self.repos.create_pool_on_load_balancer(
self.session, pool, listener_id=self.listener.id, sp_dict=sp)
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'}
new_pool_dm = self.repos.update_pool_on_listener(
new_pool_dm = self.repos.update_pool_and_sp(
self.session, pool_dm.id, update_pool, None)
self.assertIsNone(new_pool_dm.session_persistence)
@ -572,6 +586,22 @@ class ListenerRepositoryTest(BaseRepositoryTest):
self.assertEqual(constants.ONLINE, new_listener.operating_status)
self.assertTrue(new_listener.enabled)
def test_create_listener_on_different_lb_than_default_pool(self):
load_balancer2 = self.lb_repo.create(
self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2,
name="lb_name2", description="lb_description2",
provisioning_status=constants.ACTIVE,
operating_status=constants.ONLINE, enabled=True)
pool = self.pool_repo.create(
self.session, id=self.FAKE_UUID_4, project_id=self.FAKE_UUID_2,
name="pool_test", description="pool_description",
protocol=constants.PROTOCOL_HTTP,
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN,
operating_status=constants.ONLINE, enabled=True,
load_balancer_id=load_balancer2.id)
self.assertRaises(exceptions.NotFound, self.create_listener,
self.FAKE_UUID_1, 80, default_pool_id=pool.id)
def test_update(self):
name_change = "new_listener_name"
listener = self.create_listener(self.FAKE_UUID_1, 80)
@ -616,7 +646,8 @@ class ListenerRepositoryTest(BaseRepositoryTest):
name="pool_test", description="pool_description",
protocol=constants.PROTOCOL_HTTP,
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN,
operating_status=constants.ONLINE, enabled=True)
operating_status=constants.ONLINE, enabled=True,
load_balancer_id=self.load_balancer.id)
listener = self.create_listener(self.FAKE_UUID_1, 80,
default_pool_id=pool.id)
new_listener = self.listener_repo.get(self.session, id=listener.id)
@ -624,7 +655,8 @@ class ListenerRepositoryTest(BaseRepositoryTest):
self.assertEqual(pool, new_listener.default_pool)
self.listener_repo.delete(self.session, id=new_listener.id)
self.assertIsNone(self.listener_repo.get(self.session, id=listener.id))
self.assertIsNone(self.pool_repo.get(self.session, id=pool.id))
# Pool should stick around
self.assertIsNotNone(self.pool_repo.get(self.session, id=pool.id))
def test_delete_with_all_children(self):
pool = self.pool_repo.create(
@ -632,7 +664,8 @@ class ListenerRepositoryTest(BaseRepositoryTest):
name="pool_test", description="pool_description",
protocol=constants.PROTOCOL_HTTP,
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN,
operating_status=constants.ONLINE, enabled=True)
operating_status=constants.ONLINE, enabled=True,
load_balancer_id=self.load_balancer.id)
listener = self.create_listener(self.FAKE_UUID_1, 80,
default_pool_id=pool.id)
sni = self.sni_repo.create(self.session, listener_id=listener.id,
@ -651,7 +684,25 @@ class ListenerRepositoryTest(BaseRepositoryTest):
listener_id=listener.id))
self.assertIsNone(self.listener_stats_repo.get(
self.session, listener_id=sni.listener_id))
self.assertIsNone(self.pool_repo.get(self.session, id=pool.id))
# Pool should stick around
self.assertIsNotNone(self.pool_repo.get(self.session, id=pool.id))
def test_delete_default_pool_from_beneath_listener(self):
pool = self.pool_repo.create(
self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2,
name="pool_test", description="pool_description",
protocol=constants.PROTOCOL_HTTP,
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN,
operating_status=constants.ONLINE, enabled=True,
load_balancer_id=self.load_balancer.id)
listener = self.create_listener(self.FAKE_UUID_1, 80,
default_pool_id=pool.id)
new_listener = self.listener_repo.get(self.session, id=listener.id)
self.assertIsNotNone(new_listener)
self.assertEqual(pool, new_listener.default_pool)
self.pool_repo.delete(self.session, id=pool.id)
new_listener = self.listener_repo.get(self.session, id=listener.id)
self.assertIsNone(new_listener.default_pool)
class ListenerStatisticsRepositoryTest(BaseRepositoryTest):

View File

@ -33,7 +33,7 @@ class TestHaproxyCfg(base.TestCase):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" option forwardfor\n"
" redirect scheme https if !{ ssl_fc }\n"
" bind 10.0.0.2:443 "
"ssl crt /var/lib/octavia/certs/"
"sample_listener_id_1/FakeCN.pem "
@ -43,7 +43,6 @@ class TestHaproxyCfg(base.TestCase):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" redirect scheme https if !{ ssl_fc }\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
@ -71,7 +70,7 @@ class TestHaproxyCfg(base.TestCase):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" option forwardfor\n"
" redirect scheme https if !{ ssl_fc }\n"
" bind 10.0.0.2:443 "
"ssl crt /var/lib/octavia/certs/"
"sample_listener_id_1/FakeCN.pem\n"
@ -80,7 +79,6 @@ class TestHaproxyCfg(base.TestCase):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" redirect scheme https if !{ ssl_fc }\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from wsme import exc
from wsme.rest import json as wsme_json
from wsme import types as wsme_types
@ -64,7 +65,8 @@ class TestListenerPOST(base.BaseTypesTest, TestListener):
def test_listener(self):
body = {"name": "test", "description": "test", "connection_limit": 10,
"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80}
"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80,
"default_pool_id": uuidutils.generate_uuid()}
listener = wsme_json.fromjson(self._type, body)
self.assertTrue(listener.enabled)
@ -85,6 +87,7 @@ class TestListenerPUT(base.BaseTypesTest, TestListener):
def test_listener(self):
body = {"name": "test", "description": "test", "connection_limit": 10,
"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80}
"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80,
"default_pool_id": uuidutils.generate_uuid()}
listener = wsme_json.fromjson(self._type, body)
self.assertEqual(wsme_types.Unset, listener.enabled)

View File

@ -190,6 +190,7 @@ def sample_listener_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, persistence_cookie=None,
tls=False, sni=False, peer_port=None, topology=None):
proto = 'HTTP' if proto is None else proto
be_proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto
topology = 'SINGLE' if topology is None else topology
port = '443' if proto is 'HTTPS' or proto is 'TERMINATED_HTTPS' else '80'
peer_port = 1024 if peer_port is None else peer_port
@ -207,7 +208,7 @@ def sample_listener_tuple(proto=None, monitor=True, persistence=True,
topology=topology),
peer_port=peer_port,
default_pool=sample_pool_tuple(
proto=proto, monitor=monitor, persistence=persistence,
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie),
connection_limit=98,
@ -319,7 +320,6 @@ def sample_base_expected_config(frontend=None, backend=None, peers=None):
frontend = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" option forwardfor\n"
" bind 10.0.0.2:80\n"
" mode http\n"
" default_backend sample_pool_id_1\n\n")
@ -331,6 +331,7 @@ def sample_base_expected_config(frontend=None, backend=None, peers=None):
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 418\n"
" option forwardfor\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"check inter 30s fall 3 rise 2 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "

View File

@ -34,7 +34,7 @@ class TestHealthMonitorFlows(base.TestCase):
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.LISTENER, health_mon_flow.requires)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertEqual(2, len(health_mon_flow.requires))
@ -49,7 +49,7 @@ class TestHealthMonitorFlows(base.TestCase):
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
self.assertIn(constants.POOL_ID, health_mon_flow.requires)
self.assertIn(constants.LISTENER, health_mon_flow.requires)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertEqual(4, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))
@ -61,7 +61,7 @@ class TestHealthMonitorFlows(base.TestCase):
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.LISTENER, health_mon_flow.requires)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires)

View File

@ -35,8 +35,9 @@ class TestListenerFlows(base.TestCase):
self.assertIn(constants.LISTENER, listener_flow.requires)
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
self.assertIn(constants.LISTENERS, listener_flow.requires)
self.assertEqual(2, len(listener_flow.requires))
self.assertEqual(3, len(listener_flow.requires))
self.assertEqual(1, len(listener_flow.provides))
def test_get_delete_listener_flow(self):
@ -60,6 +61,7 @@ class TestListenerFlows(base.TestCase):
self.assertIn(constants.LISTENER, listener_flow.requires)
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
self.assertIn(constants.UPDATE_DICT, listener_flow.requires)
self.assertIn(constants.LISTENERS, listener_flow.requires)
self.assertEqual(3, len(listener_flow.requires))
self.assertEqual(4, len(listener_flow.requires))
self.assertEqual(0, len(listener_flow.provides))

View File

@ -33,7 +33,7 @@ class TestMemberFlows(base.TestCase):
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.LISTENER, member_flow.requires)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertEqual(2, len(member_flow.requires))
@ -45,7 +45,7 @@ class TestMemberFlows(base.TestCase):
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.LISTENER, member_flow.requires)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertIn(constants.MEMBER, member_flow.requires)
@ -58,7 +58,7 @@ class TestMemberFlows(base.TestCase):
self.assertIsInstance(member_flow, flow.Flow)
self.assertIn(constants.LISTENER, member_flow.requires)
self.assertIn(constants.LISTENERS, member_flow.requires)
self.assertIn(constants.LOADBALANCER, member_flow.requires)
self.assertIn(constants.UPDATE_DICT, member_flow.requires)

View File

@ -33,7 +33,7 @@ class TestPoolFlows(base.TestCase):
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn(constants.LISTENER, pool_flow.requires)
self.assertIn(constants.LISTENERS, pool_flow.requires)
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
self.assertEqual(2, len(pool_flow.requires))
@ -45,7 +45,7 @@ class TestPoolFlows(base.TestCase):
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn(constants.LISTENER, pool_flow.requires)
self.assertIn(constants.LISTENERS, pool_flow.requires)
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
self.assertIn(constants.POOL, pool_flow.requires)
@ -59,7 +59,7 @@ class TestPoolFlows(base.TestCase):
self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn(constants.POOL, pool_flow.requires)
self.assertIn(constants.LISTENER, pool_flow.requires)
self.assertIn(constants.LISTENERS, pool_flow.requires)
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
self.assertIn(constants.UPDATE_DICT, pool_flow.requires)

View File

@ -70,13 +70,13 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_listener_repo_update,
mock_amphora_repo_update):
listener_update_obj = amphora_driver_tasks.ListenerUpdate()
listener_update_obj.execute(_load_balancer_mock, _listener_mock)
listener_update_obj = amphora_driver_tasks.ListenersUpdate()
listener_update_obj.execute(_load_balancer_mock, [_listener_mock])
mock_driver.update.assert_called_once_with(_listener_mock, _vip_mock)
# Test the revert
amp = listener_update_obj.revert(_listener_mock)
amp = listener_update_obj.revert([_listener_mock])
repo.ListenerRepository.update.assert_called_once_with(
_session_mock,
id=LISTENER_ID,

View File

@ -757,19 +757,19 @@ class TestDatabaseTasks(base.TestCase):
LISTENER_ID,
provisioning_status=constants.ERROR)
def test_mark_lb_and_listener_active_in_db(self,
mock_generate_uuid,
mock_LOG,
mock_get_session,
mock_loadbalancer_repo_update,
mock_listener_repo_update,
mock_amphora_repo_update,
mock_amphora_repo_delete):
def test_mark_lb_and_listeners_active_in_db(self,
mock_generate_uuid,
mock_LOG,
mock_get_session,
mock_loadbalancer_repo_update,
mock_listener_repo_update,
mock_amphora_repo_update,
mock_amphora_repo_delete):
mark_lb_and_listener_active = (database_tasks.
MarkLBAndListenerActiveInDB())
mark_lb_and_listener_active.execute(self.loadbalancer_mock,
self.listener_mock)
mark_lb_and_listeners_active = (database_tasks.
MarkLBAndListenersActiveInDB())
mark_lb_and_listeners_active.execute(self.loadbalancer_mock,
[self.listener_mock])
repo.ListenerRepository.update.assert_called_once_with(
'TEST',
@ -785,8 +785,8 @@ class TestDatabaseTasks(base.TestCase):
mock_loadbalancer_repo_update.reset_mock()
mock_listener_repo_update.reset_mock()
mark_lb_and_listener_active.revert(self.loadbalancer_mock,
self.listener_mock)
mark_lb_and_listeners_active.revert(self.loadbalancer_mock,
[self.listener_mock])
repo.ListenerRepository.update.assert_called_once_with(
'TEST',
@ -1033,7 +1033,7 @@ class TestDatabaseTasks(base.TestCase):
enabled=0)
@mock.patch(
'octavia.db.repositories.Repositories.update_pool_on_listener')
'octavia.db.repositories.Repositories.update_pool_and_sp')
def test_update_pool_in_db(self,
mock_repos_pool_update,
mock_generate_uuid,
@ -1051,7 +1051,7 @@ class TestDatabaseTasks(base.TestCase):
update_pool.execute(self.pool_mock,
update_dict)
repo.Repositories.update_pool_on_listener.assert_called_once_with(
repo.Repositories.update_pool_and_sp.assert_called_once_with(
'TEST',
POOL_ID,
update_dict, sp_dict)
@ -1062,7 +1062,7 @@ class TestDatabaseTasks(base.TestCase):
update_pool.revert(self.pool_mock)
# TODO(johnsom) fix this to set the upper ojects to ERROR
repo.Repositories.update_pool_on_listener.assert_called_once_with(
repo.Repositories.update_pool_and_sp.assert_called_once_with(
'TEST',
POOL_ID,
{'enabled': 0}, None)

View File

@ -69,16 +69,19 @@ class TestControllerWorker(base.TestCase):
def setUp(self):
_health_mon_mock.pool.listener.load_balancer.amphorae = _amphora_mock
_health_mon_mock.pool.listener = _listener_mock
_health_mon_mock.pool.listener.load_balancer.vip = _vip_mock
_health_mon_mock.pool.load_balancer.amphorae = _amphora_mock
_health_mon_mock.pool.listeners = [_listener_mock]
_health_mon_mock.pool.load_balancer = _load_balancer_mock
_health_mon_mock.pool.load_balancer.vip = _vip_mock
_listener_mock.load_balancer = _load_balancer_mock
_listener_mock.load_balancer.amphorae = _amphora_mock
_listener_mock.load_balancer.vip = _vip_mock
_member_mock.pool.listener = _listener_mock
_member_mock.pool.listener.load_balancer.vip = _vip_mock
_pool_mock.listener = _listener_mock
_pool_mock.listener.load_balancer.vip = _vip_mock
_member_mock.pool.listeners = [_listener_mock]
_member_mock.pool.load_balancer = _load_balancer_mock
_member_mock.pool.load_balancer.vip = _vip_mock
_pool_mock.listeners = [_listener_mock]
_pool_mock.load_balancer = _load_balancer_mock
_pool_mock.load_balancer.vip = _vip_mock
fetch_mock = mock.MagicMock(return_value=AMP_ID)
_flow_mock.storage.fetch = fetch_mock
@ -172,10 +175,10 @@ class TestControllerWorker(base.TestCase):
assert_called_once_with(_flow_mock,
store={constants.HEALTH_MON:
_health_mon_mock,
constants.LISTENER:
_listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock}))
_flow_mock.run.assert_called_once_with()
@ -205,10 +208,10 @@ class TestControllerWorker(base.TestCase):
store={constants.HEALTH_MON:
_health_mon_mock,
constants.POOL_ID: HM_ID,
constants.LISTENER:
_listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock}))
_flow_mock.run.assert_called_once_with()
@ -238,10 +241,10 @@ class TestControllerWorker(base.TestCase):
assert_called_once_with(_flow_mock,
store={constants.HEALTH_MON:
_health_mon_mock,
constants.LISTENER:
_listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock,
_load_balancer_mock,
constants.UPDATE_DICT:
HEALTH_UPDATE_DICT}))
@ -272,7 +275,9 @@ class TestControllerWorker(base.TestCase):
store={constants.LISTENER:
_listener_mock,
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock,
constants.LISTENERS:
[_listener_mock]}))
_flow_mock.run.assert_called_once_with()
@ -329,7 +334,9 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER:
_load_balancer_mock,
constants.UPDATE_DICT:
LISTENER_UPDATE_DICT}))
LISTENER_UPDATE_DICT,
constants.LISTENERS:
[_listener_mock]}))
_flow_mock.run.assert_called_once_with()
@ -489,9 +496,10 @@ class TestControllerWorker(base.TestCase):
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.MEMBER: _member_mock,
constants.LISTENER: _listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock}))
_flow_mock.run.assert_called_once_with()
@ -518,9 +526,10 @@ class TestControllerWorker(base.TestCase):
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(
_flow_mock, store={constants.MEMBER: _member_mock,
constants.LISTENER: _listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock}))
_flow_mock.run.assert_called_once_with()
@ -547,11 +556,12 @@ class TestControllerWorker(base.TestCase):
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.MEMBER: _member_mock,
constants.LISTENER: _listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock,
_load_balancer_mock,
constants.UPDATE_DICT:
MEMBER_UPDATE_DICT}))
MEMBER_UPDATE_DICT}))
_flow_mock.run.assert_called_once_with()
@ -578,9 +588,10 @@ class TestControllerWorker(base.TestCase):
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.POOL: _pool_mock,
constants.LISTENER: _listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock}))
_flow_mock.run.assert_called_once_with()
@ -607,9 +618,10 @@ class TestControllerWorker(base.TestCase):
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.POOL: _pool_mock,
constants.LISTENER: _listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock}))
_load_balancer_mock}))
_flow_mock.run.assert_called_once_with()
@ -636,11 +648,12 @@ class TestControllerWorker(base.TestCase):
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.POOL: _pool_mock,
constants.LISTENER: _listener_mock,
constants.LISTENERS:
[_listener_mock],
constants.LOADBALANCER:
_load_balancer_mock,
_load_balancer_mock,
constants.UPDATE_DICT:
POOL_UPDATE_DICT}))
POOL_UPDATE_DICT}))
_flow_mock.run.assert_called_once_with()