Allow LB creation with VIP_NETWORK *or* VIP_SUBNET

Octavia allows LB creation with only a vip_network_id specified. This is
necessary for things like routed-network environments where subnets are
often not exposed to users, or else not well understood by them, and
Octavia should retain responsibility for choosing the subnet.

Change-Id: Ic252cf7d5cc7277fa86bf12df8776dac27151f83
This commit is contained in:
Adam Harwell 2020-03-16 10:23:15 -07:00
parent 772ab40a9d
commit d3bd9ef3f3
9 changed files with 289 additions and 49 deletions

View File

@ -51,11 +51,9 @@ As you can see, there are many properties related to the policy. The good news
is that for most of them, there are reasonable default values. All properties
are optional except for the following few:
- ``pool.subnet``: This property provides the name or ID of the subnet for the
port on which nodes can be connected.
- ``vip.subnet``: This property provides the name or ID of the subnet on which
the virtual IP (VIP) is allocated.
- ``vip.subnet`` or ``vip.network``: These properties provides the name or ID
of the subnet or network on which the virtual IP (VIP) is allocated. At least
one (or both) of them must be specified.
The following subsections describe each and every group of properties and the
general rules on using them.
@ -128,13 +126,15 @@ Virtual IP
The Virtual IP (or "VIP" for short) refers to the IP address visible from the
client side. It is the single IP address used by all clients to access the
application or service running on the pool nodes. You have to specify a value
for the ``vip.subnet`` property even though you don't have a preference about
the actual VIP allocated. However, if you do have a preferred VIP address to
use, you will need to provide both ``vip.subnet`` and ``vip.address`` values.
for either the ``vip.subnet`` or ``vip.network`` property even though you don't
have a preference about the actual VIP allocated. However, if you do have a
preferred VIP address to use, you will need to provide both a
``vip.subnet``/``vip.network`` and a ``vip.address`` value.
The LBaaS service will check if both values are valid.
Note that if you choose to omit the ``vip.address`` property, the LBaaS
service will allocate an address for you from the provided subnet. You will
service will allocate an address for you from the either the provided subnet,
or a subnet automatically chosen from the provided network. You will
have to check the cluster's ``data`` property after the load-balancing policy
has been successfully attached to your cluster. For example:
@ -262,8 +262,8 @@ Validation
~~~~~~~~~~
When creating a new load-balancing policy object, Senlin checks if the subnet
provided are actually known to the Neutron network service. Or else, the
policy creation will fail.
and/or network provided are actually known to the Neutron network service. If
they are not, the policy creation will fail.
Updates to the Cluster and Nodes

View File

@ -0,0 +1,5 @@
---
fixes:
- |
Loadbalancers incorrectly required a VIP subnet, when they should actually
accept either a VIP subnet or VIP network. Now either/both is acceptable.

View File

@ -103,17 +103,23 @@ class LoadBalancerDriver(base.DriverBase):
result = {}
# Create loadblancer
subnet_id = None
network_id = None
try:
subnet = self.nc().subnet_get(vip['subnet'])
if vip.get('subnet'):
subnet = self.nc().subnet_get(vip['subnet'])
subnet_id = subnet.id
if vip.get('network'):
network = self.nc().network_get(vip['network'])
network_id = network.id
except exception.InternalError as ex:
msg = 'Failed in getting subnet: %s.' % ex
LOG.exception(msg)
return False, msg
subnet_id = subnet.id
try:
lb = self.oc().loadbalancer_create(
subnet_id, vip.get('address', None), vip['admin_state_up'],
availability_zone=az)
subnet_id, network_id, vip.get('address', None),
vip['admin_state_up'], availability_zone=az)
except exception.InternalError as ex:
msg = ('Failed in creating loadbalancer: %s.'
% str(ex))

View File

@ -35,15 +35,18 @@ class OctaviaClient(base.DriverBase):
return lb
@sdk.translate_exception
def loadbalancer_create(self, vip_subnet_id, vip_address=None,
admin_state_up=True, name=None, description=None,
availability_zone=None):
def loadbalancer_create(self, vip_subnet_id=None, vip_network_id=None,
vip_address=None, admin_state_up=True, name=None,
description=None, availability_zone=None):
kwargs = {
'vip_subnet_id': vip_subnet_id,
'admin_state_up': admin_state_up,
}
if vip_subnet_id is not None:
kwargs['vip_subnet_id'] = vip_subnet_id
if vip_network_id is not None:
kwargs['vip_network_id'] = vip_network_id
if vip_address is not None:
kwargs['vip_address'] = vip_address
if name is not None:

View File

@ -42,7 +42,7 @@ class LoadBalancingPolicy(base.Policy):
the cluster (which could be created by the policy) when these actions are
performed.
"""
VERSION = '1.1'
VERSION = '1.3'
VERSIONS = {
'1.0': [
{'status': consts.SUPPORTED, 'since': '2016.04'}
@ -53,6 +53,9 @@ class LoadBalancingPolicy(base.Policy):
'1.2': [
{'status': consts.SUPPORTED, 'since': '2020.02'}
],
'1.3': [
{'status': consts.SUPPORTED, 'since': '2020.03'}
],
}
PRIORITY = 500
@ -116,10 +119,10 @@ class LoadBalancingPolicy(base.Policy):
)
_VIP_KEYS = (
VIP_SUBNET, VIP_ADDRESS, VIP_CONNECTION_LIMIT, VIP_PROTOCOL,
VIP_PROTOCOL_PORT, VIP_ADMIN_STATE_UP,
VIP_SUBNET, VIP_NETWORK, VIP_ADDRESS, VIP_CONNECTION_LIMIT,
VIP_PROTOCOL, VIP_PROTOCOL_PORT, VIP_ADMIN_STATE_UP,
) = (
'subnet', 'address', 'connection_limit', 'protocol',
'subnet', 'network', 'address', 'connection_limit', 'protocol',
'protocol_port', 'admin_state_up',
)
@ -201,8 +204,13 @@ class LoadBalancingPolicy(base.Policy):
schema={
VIP_SUBNET: schema.String(
_('Name or ID of Subnet on which the VIP address will be '
'allocated.'),
required=True,
'allocated. One of Subnet or Network is required.'),
required=False,
),
VIP_NETWORK: schema.String(
_('Name or ID of Network on which the VIP address will be '
'allocated. One of Subnet or Network is required.'),
required=False,
),
VIP_ADDRESS: schema.String(
_('IP address of the VIP.'),
@ -324,13 +332,33 @@ class LoadBalancingPolicy(base.Policy):
) % {'key': self.POOL_SUBNET, 'value': name_or_id}
raise exc.InvalidSpec(message=msg)
# validate VIP subnet
name_or_id = self.vip_spec.get(self.VIP_SUBNET)
# validate VIP subnet or network
subnet_name_or_id = self.vip_spec.get(self.VIP_SUBNET)
network_name_or_id = self.vip_spec.get(self.VIP_NETWORK)
if not subnet_name_or_id and not network_name_or_id:
msg = _("At least one of VIP Subnet or Network must be defined.")
raise exc.InvalidSpec(message=msg)
try:
nc.subnet_get(name_or_id)
# Check subnet if it is set
obj_type = self.VIP_SUBNET
name_or_id = subnet_name_or_id
if name_or_id:
nc.subnet_get(name_or_id)
# Check network if it is set
obj_type = self.VIP_NETWORK
name_or_id = network_name_or_id
if name_or_id:
nc.network_get(name_or_id)
# TODO(rm_work): We *could* do more validation here to catch issues
# at validation time, like verifying the subnet's network_id is the
# same as the id of the network, if both are set -- but for now we
# will just leave that up to the LB API, which means if there is a
# failure, it won't be caught until attach time.
except exc.InternalError:
msg = _("The specified %(key)s '%(value)s' could not be found."
) % {'key': self.VIP_SUBNET, 'value': name_or_id}
) % {'key': obj_type, 'value': name_or_id}
raise exc.InvalidSpec(message=msg)
# validate loadbalancer

View File

@ -28,6 +28,7 @@ class OctaviaClient(base.DriverBase):
def __init__(self, ctx):
self.fake_lb = {
"admin_state_up": True,
"availability_zone": "test_az",
"description": "Best App load balancer 1",
"id": FAKE_LB_ID,
"listeners": [{"id": FAKE_LISTENER_ID}],
@ -39,7 +40,8 @@ class OctaviaClient(base.DriverBase):
"provisioning_status": "ACTIVE",
"vip_address": "203.0.113.10",
"vip_port_id": "1e20d91d-8df9-4c15-9778-28bc89226c19",
"vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea"
"vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea",
"vip_network_id": "e2de51e5-f10a-40f3-8f5c-7bab784b1380",
}
self.fake_listener = {
@ -128,9 +130,11 @@ class OctaviaClient(base.DriverBase):
"url_path": "/"
}
def loadbalancer_create(self, vip_subnet_id, vip_address=None,
admin_state_up=True, name=None, description=None):
def loadbalancer_create(self, vip_subnet_id=None, vip_network_id=None,
vip_address=None, admin_state_up=True, name=None,
description=None):
self.fake_lb["vip_subnet_id"] = vip_subnet_id
self.fake_lb["vip_network_id"] = vip_network_id
self.fake_lb["admin_state_up"] = admin_state_up
if vip_address:
self.fake_lb["vip_address"] = vip_address

View File

@ -128,7 +128,7 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
self.assertFalse(res)
mock_sleep.assert_called_once_with(10)
def test_lb_create_succeeded(self):
def test_lb_create_succeeded_subnet(self):
lb_obj = mock.Mock()
listener_obj = mock.Mock()
pool_obj = mock.Mock()
@ -156,7 +156,7 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
self.assertTrue(status)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'],
availability_zone=self.availability_zone)
self.assertEqual('LB_ID', res['loadbalancer'])
self.assertEqual('192.168.1.100', res['vip_address'])
@ -179,6 +179,64 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
self.lb_driver._wait_for_lb_ready.assert_has_calls(
calls, any_order=False)
def test_lb_create_succeeded_network(self):
vip = {
'network': 'network-01',
'address': '192.168.1.100',
'admin_state_up': True,
'protocol': 'HTTP',
'protocol_port': 80,
'connection_limit': 50
}
lb_obj = mock.Mock()
listener_obj = mock.Mock()
pool_obj = mock.Mock()
hm_obj = mock.Mock()
lb_obj.id = 'LB_ID'
lb_obj.vip_address = '192.168.1.100'
listener_obj.id = 'LISTENER_ID'
pool_obj.id = 'POOL_ID'
network_obj = mock.Mock()
network_obj.name = 'network'
network_obj.id = 'NETWORK_ID'
hm_obj.id = 'HEALTHMONITOR_ID'
self.oc.loadbalancer_create.return_value = lb_obj
self.oc.listener_create.return_value = listener_obj
self.oc.pool_create.return_value = pool_obj
self.oc.healthmonitor_create.return_value = hm_obj
self.nc.network_get.return_value = network_obj
self.lb_driver._wait_for_lb_ready = mock.Mock()
self.lb_driver._wait_for_lb_ready.return_value = True
status, res = self.lb_driver.lb_create(vip, self.pool, self.hm,
self.availability_zone)
self.assertTrue(status)
self.oc.loadbalancer_create.assert_called_once_with(
None, 'NETWORK_ID', vip['address'], vip['admin_state_up'],
availability_zone=self.availability_zone)
self.assertEqual('LB_ID', res['loadbalancer'])
self.assertEqual('192.168.1.100', res['vip_address'])
self.oc.listener_create.assert_called_once_with(
'LB_ID', vip['protocol'], vip['protocol_port'],
vip['connection_limit'], vip['admin_state_up'])
self.assertEqual('LISTENER_ID', res['listener'])
self.oc.pool_create.assert_called_once_with(
self.pool['lb_method'], 'LISTENER_ID', self.pool['protocol'],
self.pool['admin_state_up'])
self.assertEqual('POOL_ID', res['pool'])
self.oc.healthmonitor_create.assert_called_once_with(
self.hm['type'], self.hm['delay'], self.hm['timeout'],
self.hm['max_retries'], 'POOL_ID', self.hm['admin_state_up'],
self.hm['http_method'], self.hm['url_path'],
self.hm['expected_codes'])
self.assertEqual('HEALTHMONITOR_ID', res['healthmonitor'])
self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID')
calls = [mock.call('LB_ID') for i in range(1, 5)]
self.lb_driver._wait_for_lb_ready.assert_has_calls(
calls, any_order=False)
def test_lb_create_loadbalancer_creation_failed(self):
lb_obj = mock.Mock()
lb_obj.id = 'LB_ID'
@ -198,7 +256,7 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
msg = _('Failed in creating loadbalancer (%s).') % 'LB_ID'
self.assertEqual(msg, res)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'],
availability_zone=None)
self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID')
self.lb_driver.lb_delete.assert_called_once_with(
@ -244,7 +302,7 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
msg = _('Failed in creating listener (%s).') % 'LISTENER_ID'
self.assertEqual(msg, res)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'],
availability_zone=None)
self.oc.listener_create.assert_called_once_with(
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
@ -289,7 +347,7 @@ class TestOctaviaLBaaSDriver(base.SenlinTestCase):
msg = _('Failed in creating pool (%s).') % 'POOL_ID'
self.assertEqual(msg, res)
self.oc.loadbalancer_create.assert_called_once_with(
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'],
'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'],
availability_zone=None)
self.oc.listener_create.assert_called_once_with(
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_context import context as oslo_context
@ -32,7 +33,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
self.context = utils.dummy_context()
self.spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.0',
'version': '1.3',
'properties': {
'pool': {
'id': '',
@ -49,6 +50,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
'vip': {
'address': '192.168.1.100',
'subnet': 'external-subnet',
'network': 'external-network',
'connection_limit': 500,
'protocol': 'HTTP',
'protocol_port': 80,
@ -65,6 +67,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
'expected_codes': '200,201,202'
},
'lb_status_timeout': 300,
'availability_zone': 'test_az'
}
}
self.sd = mock.Mock()
@ -79,15 +82,15 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.loadbalance-1.0', policy.type)
self.assertEqual('senlin.policy.loadbalance-1.3', policy.type)
self.assertEqual(self.spec['properties']['pool'], policy.pool_spec)
self.assertEqual(self.spec['properties']['vip'], policy.vip_spec)
self.assertIsNone(policy.lb)
def test_init_with_default_value(self):
def test_init_with_default_value_subnet_only(self):
spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.0',
'version': '1.3',
'properties': {
'pool': {'subnet': 'internal-subnet'},
'vip': {'subnet': 'external-subnet'}
@ -95,7 +98,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
}
default_spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.0',
'version': '1.3',
'properties': {
'pool': {
'id': None,
@ -109,6 +112,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
'vip': {
'address': None,
'subnet': 'external-subnet',
'network': None,
'connection_limit': -1,
'protocol': 'HTTP',
'protocol_port': 80,
@ -122,7 +126,100 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.loadbalance-1.0', policy.type)
self.assertEqual('senlin.policy.loadbalance-1.3', policy.type)
self.assertEqual(default_spec['properties']['pool'], policy.pool_spec)
self.assertEqual(default_spec['properties']['vip'], policy.vip_spec)
self.assertEqual(default_spec['properties']['lb_status_timeout'],
policy.lb_status_timeout)
self.assertIsNone(policy.lb)
def test_init_with_default_value_network_only(self):
spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.3',
'properties': {
'pool': {'subnet': 'internal-subnet'},
'vip': {'network': 'external-network'}
}
}
default_spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.3',
'properties': {
'pool': {
'id': None,
'protocol': 'HTTP',
'protocol_port': 80,
'subnet': 'internal-subnet',
'lb_method': 'ROUND_ROBIN',
'admin_state_up': True,
'session_persistence': {},
},
'vip': {
'address': None,
'subnet': None,
'network': 'external-network',
'connection_limit': -1,
'protocol': 'HTTP',
'protocol_port': 80,
'admin_state_up': True,
},
'lb_status_timeout': 300
}
}
policy = lb_policy.LoadBalancingPolicy('test-policy', spec)
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.loadbalance-1.3', policy.type)
self.assertEqual(default_spec['properties']['pool'], policy.pool_spec)
self.assertEqual(default_spec['properties']['vip'], policy.vip_spec)
self.assertEqual(default_spec['properties']['lb_status_timeout'],
policy.lb_status_timeout)
self.assertIsNone(policy.lb)
def test_init_with_default_value_subnet_and_network(self):
spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.3',
'properties': {
'pool': {'subnet': 'internal-subnet'},
'vip': {'subnet': 'external-subnet',
'network': 'external-network'}
}
}
default_spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.3',
'properties': {
'pool': {
'id': None,
'protocol': 'HTTP',
'protocol_port': 80,
'subnet': 'internal-subnet',
'lb_method': 'ROUND_ROBIN',
'admin_state_up': True,
'session_persistence': {},
},
'vip': {
'address': None,
'subnet': 'external-subnet',
'network': 'external-network',
'connection_limit': -1,
'protocol': 'HTTP',
'protocol_port': 80,
'admin_state_up': True,
},
'lb_status_timeout': 300
}
}
policy = lb_policy.LoadBalancingPolicy('test-policy', spec)
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.loadbalance-1.3', policy.type)
self.assertEqual(default_spec['properties']['pool'], policy.pool_spec)
self.assertEqual(default_spec['properties']['vip'], policy.vip_spec)
self.assertEqual(default_spec['properties']['lb_status_timeout'],
@ -132,7 +229,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
def test_loadbalancer_value(self):
spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.0',
'version': '1.3',
'properties': {
'loadbalancer': 'LB_ID',
'pool': {
@ -141,7 +238,8 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
},
'vip': {
'address': '192.168.1.100',
'subnet': 'external-subnet'
'subnet': 'external-subnet',
'network': 'external-network',
},
'health_monitor': {
'id': 'HM_ID'
@ -156,7 +254,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
policy = lb_policy.LoadBalancingPolicy('test-policy', spec)
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.loadbalance-1.0', policy.type)
self.assertEqual('senlin.policy.loadbalance-1.3', policy.type)
self.assertEqual(self.spec['properties']['pool'], policy.pool_spec)
self.assertEqual(self.spec['properties']['vip'], policy.vip_spec)
self.assertEqual(self.spec['properties']['loadbalancer'], policy.lb)
@ -210,7 +308,42 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
"be found.", str(ex))
@mock.patch.object(policy_base.Policy, 'validate')
def test_validate_loadbalancer_notfund(self, mock_validate):
def test_validate_vip_network_notfound(self, mock_validate):
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)
policy._networkclient = self.net_driver
policy._octaviaclient = self.octavia_driver
ctx = mock.Mock(user='user1', project='project1')
self.net_driver.network_get = mock.Mock(
side_effect=[
exc.InternalError(code='404', message='not found')
]
)
ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True)
mock_validate.assert_called_with(ctx, True)
self.net_driver.network_get.assert_called_with('external-network')
self.assertEqual("The specified network 'external-network' could not "
"be found.", str(ex))
@mock.patch.object(policy_base.Policy, 'validate')
def test_validate_vip_no_subnet_or_network_provided(self, mock_validate):
spec = copy.deepcopy(self.spec)
del spec['properties']['vip']['subnet']
del spec['properties']['vip']['network']
policy = lb_policy.LoadBalancingPolicy('test-policy', spec)
policy._networkclient = self.net_driver
policy._octaviaclient = self.octavia_driver
ctx = mock.Mock(user='user1', project='project1')
ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True)
mock_validate.assert_called_with(ctx, True)
self.assertEqual("At least one of VIP Subnet or Network must be "
"defined.", str(ex))
@mock.patch.object(policy_base.Policy, 'validate')
def test_validate_loadbalancer_notfound(self, mock_validate):
self.spec['properties']['loadbalancer'] = "LB_ID"
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)
policy._networkclient = self.net_driver
@ -490,7 +623,7 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
self.context = utils.dummy_context()
self.spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.0',
'version': '1.3',
'properties': {
'pool': {
'protocol': 'HTTP',
@ -506,6 +639,7 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
'vip': {
'address': '192.168.1.100',
'subnet': 'test-subnet',
'network': 'test-network',
'connection_limit': 500,
'protocol': 'HTTP',
'protocol_port': 80,
@ -520,7 +654,8 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
'http_method': 'GET',
'url_path': '/index.html',
'expected_codes': '200,201,202'
}
},
'availability_zone': 'test_az',
}
}
self.lb_driver = mock.Mock()

View File

@ -63,6 +63,7 @@ senlin.policies =
senlin.policy.loadbalance-1.0 = senlin.policies.lb_policy:LoadBalancingPolicy
senlin.policy.loadbalance-1.1 = senlin.policies.lb_policy:LoadBalancingPolicy
senlin.policy.loadbalance-1.2 = senlin.policies.lb_policy:LoadBalancingPolicy
senlin.policy.loadbalance-1.3 = senlin.policies.lb_policy:LoadBalancingPolicy
senlin.policy.region_placement-1.0 = senlin.policies.region_placement:RegionPlacementPolicy
senlin.policy.zone_placement-1.0 = senlin.policies.zone_placement:ZonePlacementPolicy
senlin.policy.affinity-1.0 = senlin.policies.affinity_policy:AffinityPolicy