Add option to choose LB availability_zone
Octavia now supports availability zones for loadbalancers. Now it is possible to pass this option through the LB policy. Depends-On: https://review.opendev.org/#/c/710452/ Change-Id: Ib49b38490f83fea19307751ef1cd073307e352ae
This commit is contained in:
parent
d0e814a491
commit
531283783e
|
@ -248,6 +248,16 @@ load-balancing policy to mitigate this effect. In real production environment,
|
|||
you are expected to set this value based on some careful dry-runs.
|
||||
|
||||
|
||||
Availability Zone
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Load balancers have their own availability zones, similar to the compute
|
||||
service.
|
||||
|
||||
The ``availability_zone`` option is provided since version 1.2 of the
|
||||
load-balancing policy, to allow the user to choose which availability zone to
|
||||
use when provisioning the load balancer.
|
||||
|
||||
Validation
|
||||
~~~~~~~~~~
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
features:
|
||||
- |
|
||||
Add availability_zone option for loadbalancers. This is supported by
|
||||
Octavia starting in the Ussuri release.
|
|
@ -89,7 +89,7 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
|
||||
return False
|
||||
|
||||
def lb_create(self, vip, pool, hm=None):
|
||||
def lb_create(self, vip, pool, hm=None, az=None):
|
||||
"""Create a LBaaS instance
|
||||
|
||||
:param vip: A dict containing the properties for the VIP;
|
||||
|
@ -111,9 +111,9 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
return False, msg
|
||||
subnet_id = subnet.id
|
||||
try:
|
||||
lb = self.oc().loadbalancer_create(subnet_id,
|
||||
vip.get('address', None),
|
||||
vip['admin_state_up'])
|
||||
lb = self.oc().loadbalancer_create(
|
||||
subnet_id, vip.get('address', None), vip['admin_state_up'],
|
||||
availability_zone=az)
|
||||
except exception.InternalError as ex:
|
||||
msg = ('Failed in creating loadbalancer: %s.'
|
||||
% str(ex))
|
||||
|
|
|
@ -36,7 +36,8 @@ class OctaviaClient(base.DriverBase):
|
|||
|
||||
@sdk.translate_exception
|
||||
def loadbalancer_create(self, vip_subnet_id, vip_address=None,
|
||||
admin_state_up=True, name=None, description=None):
|
||||
admin_state_up=True, name=None, description=None,
|
||||
availability_zone=None):
|
||||
|
||||
kwargs = {
|
||||
'vip_subnet_id': vip_subnet_id,
|
||||
|
@ -49,6 +50,8 @@ class OctaviaClient(base.DriverBase):
|
|||
kwargs['name'] = name
|
||||
if description is not None:
|
||||
kwargs['description'] = description
|
||||
if availability_zone is not None:
|
||||
kwargs['availability_zone'] = availability_zone
|
||||
|
||||
res = self.conn.load_balancer.create_load_balancer(**kwargs)
|
||||
return res
|
||||
|
|
|
@ -50,6 +50,9 @@ class LoadBalancingPolicy(base.Policy):
|
|||
'1.1': [
|
||||
{'status': consts.SUPPORTED, 'since': '2018.01'}
|
||||
],
|
||||
'1.2': [
|
||||
{'status': consts.SUPPORTED, 'since': '2020.02'}
|
||||
],
|
||||
}
|
||||
|
||||
PRIORITY = 500
|
||||
|
@ -73,9 +76,11 @@ class LoadBalancingPolicy(base.Policy):
|
|||
]
|
||||
|
||||
KEYS = (
|
||||
POOL, VIP, HEALTH_MONITOR, LB_STATUS_TIMEOUT, LOADBALANCER
|
||||
POOL, VIP, HEALTH_MONITOR, LB_STATUS_TIMEOUT, LOADBALANCER,
|
||||
AVAILABILITY_ZONE
|
||||
) = (
|
||||
'pool', 'vip', 'health_monitor', 'lb_status_timeout', 'loadbalancer'
|
||||
'pool', 'vip', 'health_monitor', 'lb_status_timeout', 'loadbalancer',
|
||||
'availability_zone'
|
||||
)
|
||||
|
||||
_POOL_KEYS = (
|
||||
|
@ -283,6 +288,11 @@ class LoadBalancingPolicy(base.Policy):
|
|||
_('Name or ID of loadbalancer for the cluster on which nodes can '
|
||||
'be connected.'),
|
||||
default=None,
|
||||
),
|
||||
AVAILABILITY_ZONE: schema.String(
|
||||
_('Name of the loadbalancer availability zone to use for creation '
|
||||
'of the loadbalancer.'),
|
||||
default=None,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -292,6 +302,7 @@ class LoadBalancingPolicy(base.Policy):
|
|||
self.pool_spec = self.properties.get(self.POOL, {})
|
||||
self.vip_spec = self.properties.get(self.VIP, {})
|
||||
self.hm_spec = self.properties.get(self.HEALTH_MONITOR, None)
|
||||
self.az_spec = self.properties.get(self.AVAILABILITY_ZONE, None)
|
||||
self.lb_status_timeout = self.properties.get(self.LB_STATUS_TIMEOUT)
|
||||
self.lb = self.properties.get(self.LOADBALANCER, None)
|
||||
|
||||
|
@ -359,7 +370,7 @@ class LoadBalancingPolicy(base.Policy):
|
|||
data['healthmonitor'] = self.hm_spec.get(self.HM_ID)
|
||||
else:
|
||||
res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec,
|
||||
self.hm_spec)
|
||||
self.hm_spec, self.az_spec)
|
||||
if res is False:
|
||||
return False, data
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
|
||||
self.member_id = "9a7aff27-fd41-4ec1-ba4c-3eb92c629313"
|
||||
|
||||
def lb_create(self, vip, pool, hm=None):
|
||||
def lb_create(self, vip, pool, hm=None, az=None):
|
||||
return True, self.lb_result
|
||||
|
||||
def lb_delete(self, **kwargs):
|
||||
|
|
|
@ -63,6 +63,7 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
|
|||
"url_path": "/index.html",
|
||||
"expected_codes": "200,201,202"
|
||||
}
|
||||
self.availability_zone = 'my_fake_az'
|
||||
|
||||
def test_init(self):
|
||||
conn_params = self.context.to_dict()
|
||||
|
@ -150,11 +151,13 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
|
|||
|
||||
self.lb_driver._wait_for_lb_ready = mock.Mock()
|
||||
self.lb_driver._wait_for_lb_ready.return_value = True
|
||||
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm)
|
||||
status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm,
|
||||
self.availability_zone)
|
||||
|
||||
self.assertTrue(status)
|
||||
self.oc.loadbalancer_create.assert_called_once_with(
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
|
||||
availability_zone=self.availability_zone)
|
||||
self.assertEqual('LB_ID', res['loadbalancer'])
|
||||
self.assertEqual('192.168.1.100', res['vip_address'])
|
||||
self.oc.listener_create.assert_called_once_with(
|
||||
|
@ -195,7 +198,8 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
|
|||
msg = _('Failed in creating loadbalancer (%s).') % 'LB_ID'
|
||||
self.assertEqual(msg, res)
|
||||
self.oc.loadbalancer_create.assert_called_once_with(
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
|
||||
availability_zone=None)
|
||||
self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID')
|
||||
self.lb_driver.lb_delete.assert_called_once_with(
|
||||
loadbalancer='LB_ID')
|
||||
|
@ -240,7 +244,8 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
|
|||
msg = _('Failed in creating listener (%s).') % 'LISTENER_ID'
|
||||
self.assertEqual(msg, res)
|
||||
self.oc.loadbalancer_create.assert_called_once_with(
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
|
||||
availability_zone=None)
|
||||
self.oc.listener_create.assert_called_once_with(
|
||||
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
|
||||
self.vip['connection_limit'], self.vip['admin_state_up'])
|
||||
|
@ -284,7 +289,8 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
|
|||
msg = _('Failed in creating pool (%s).') % 'POOL_ID'
|
||||
self.assertEqual(msg, res)
|
||||
self.oc.loadbalancer_create.assert_called_once_with(
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
|
||||
availability_zone=None)
|
||||
self.oc.listener_create.assert_called_once_with(
|
||||
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
|
||||
self.vip['connection_limit'], self.vip['admin_state_up'])
|
||||
|
|
|
@ -64,7 +64,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
|
|||
'url_path': '/index.html',
|
||||
'expected_codes': '200,201,202'
|
||||
},
|
||||
'lb_status_timeout': 300
|
||||
'lb_status_timeout': 300,
|
||||
}
|
||||
}
|
||||
self.sd = mock.Mock()
|
||||
|
@ -254,7 +254,8 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
|
|||
self.assertEqual('policy_data', data)
|
||||
self.lb_driver.lb_create.assert_called_once_with(policy.vip_spec,
|
||||
policy.pool_spec,
|
||||
policy.hm_spec)
|
||||
policy.hm_spec,
|
||||
policy.az_spec)
|
||||
member_add_calls = [
|
||||
mock.call(node1, 'LB_ID', 'POOL_ID', 80, 'internal-subnet'),
|
||||
mock.call(node2, 'LB_ID', 'POOL_ID', 80, 'internal-subnet')
|
||||
|
|
|
@ -62,6 +62,7 @@ senlin.policies =
|
|||
senlin.policy.health-1.1 = senlin.policies.health_policy:HealthPolicy
|
||||
senlin.policy.loadbalance-1.0 = senlin.policies.lb_policy:LoadBalancingPolicy
|
||||
senlin.policy.loadbalance-1.1 = senlin.policies.lb_policy:LoadBalancingPolicy
|
||||
senlin.policy.loadbalance-1.2 = senlin.policies.lb_policy:LoadBalancingPolicy
|
||||
senlin.policy.region_placement-1.0 = senlin.policies.region_placement:RegionPlacementPolicy
|
||||
senlin.policy.zone_placement-1.0 = senlin.policies.zone_placement:ZonePlacementPolicy
|
||||
senlin.policy.affinity-1.0 = senlin.policies.affinity_policy:AffinityPolicy
|
||||
|
|
Loading…
Reference in New Issue