Dual-stack support for L3 Policy

This adds dual-stack support for L3 Policy. It leverages
the existing parameters for subnetpools and address scopes,
and adds behaviors to support the implicit workflow.

Change-Id: Idedbb3d08b09e76abdba6d1aba0f62ba53a19a99
partially-implements: blueprint address-scope-mapping
(cherry picked from commit 3ca5037402)
This commit is contained in:
Thomas Bachman 2017-04-20 17:38:44 +00:00
parent 750491a1d5
commit 7e4345d5f4
13 changed files with 1028 additions and 644 deletions

View File

@ -4,6 +4,6 @@ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-120} \
# REVISIT: Temporarily exclude gbpservice/neutron contrib tests,
# renable when we are able to get UT run time to less than 50 mins
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron} $LISTOPT $IDOPTION
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron/tests/unit/services} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -26,6 +26,7 @@ from sqlalchemy.orm import exc
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_constants)
from gbpservice.neutron.services.grouppolicy.common import utils
LOG = logging.getLogger(__name__)
@ -1048,40 +1049,48 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
@staticmethod
def validate_ip_pool(ip_pool, ip_version):
attr._validate_subnet(ip_pool)
ip_net = netaddr.IPNetwork(ip_pool)
if ip_net.version != ip_version:
raise gpolicy.InvalidIpPoolVersion(ip_pool=ip_pool,
version=ip_version)
if (ip_net.size <= 3):
err_msg = "Too few available IPs in the pool."
raise gpolicy.InvalidIpPoolSize(ip_pool=ip_pool, err_msg=err_msg,
size=ip_net.size)
if (ip_net.prefixlen == 0):
err_msg = "Prefix length of 0 is invalid."
raise gpolicy.InvalidIpPoolPrefixLength(ip_pool=ip_pool,
err_msg=err_msg,
prefixlen=ip_net.prefixlen)
# An empty pool is allowed, as it can be used
# with the default subnetpools extension
if not ip_pool:
return
if ip_version == 46:
valid_versions = [4, 6]
else:
valid_versions = [ip_version]
ip_pool_list = utils.convert_ip_pool_string_to_list(ip_pool)
for pool in ip_pool_list:
attr._validate_subnet(ip_pool)
ip_net = netaddr.IPNetwork(pool)
if ip_net.version not in valid_versions:
raise gpolicy.InvalidIpPoolVersion(ip_pool=pool,
version=ip_version)
if (ip_net.size <= 3):
err_msg = "Too few available IPs in the pool."
raise gpolicy.InvalidIpPoolSize(ip_pool=pool, err_msg=err_msg,
size=ip_net.size)
if (ip_net.prefixlen == 0):
err_msg = "Prefix length of 0 is invalid."
raise gpolicy.InvalidIpPoolPrefixLength(ip_pool=pool,
err_msg=err_msg,
prefixlen=ip_net.prefixlen)
@staticmethod
def validate_subnet_prefix_length(ip_version, new_prefix_length,
ip_pool=None):
if (new_prefix_length < 2) or (
ip_version == 4 and (
new_prefix_length > MAX_IPV4_SUBNET_PREFIX_LENGTH)) or (
ip_version == 6 and (
new_prefix_length > MAX_IPV6_SUBNET_PREFIX_LENGTH)):
if (ip_version == 4 or ip_version == 46) and ((new_prefix_length >
MAX_IPV4_SUBNET_PREFIX_LENGTH) or (new_prefix_length < 2)):
raise gpolicy.InvalidDefaultSubnetPrefixLength(
length=new_prefix_length, protocol=ip_version)
if ip_pool is not None:
ip_pool_list = utils.convert_ip_pool_string_to_list(ip_pool)
for pool in ip_pool_list:
# Check if subnet_prefix_length is smaller
# than size of the ip_pool's subnet.
ip_pool_prefix_length = netaddr.IPNetwork(ip_pool).prefixlen
if(ip_pool_prefix_length > new_prefix_length):
raise gpolicy.SubnetPrefixLengthExceedsIpPool(
ip_pool=ip_pool, subnet_size=new_prefix_length)
if netaddr.IPNetwork(pool).version == 4:
ip_pool_prefix_length = netaddr.IPNetwork(pool).prefixlen
if(ip_pool_prefix_length > new_prefix_length):
raise gpolicy.SubnetPrefixLengthExceedsIpPool(
ip_pool=pool, subnet_size=new_prefix_length)
@log.log_method_call
def create_policy_target(self, context, policy_target):

View File

@ -9,9 +9,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron.common import exceptions as nexc
from neutron import context as n_context
from neutron.db import model_base
from neutron.db import models_v2
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_utils import uuidutils
@ -21,6 +24,23 @@ from sqlalchemy import orm
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy.common import exceptions
from gbpservice.neutron.services.grouppolicy.common import utils
def get_current_context():
i = 1
not_found = True
try:
while not_found:
for val in sys._getframe(i).f_locals.values():
if isinstance(val, n_context.Context):
ctx = val
not_found = False
break
i = i + 1
return ctx
except Exception:
return
LOG = logging.getLogger(__name__)
@ -145,13 +165,20 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
res['subnets'] = [subnet.subnet_id for subnet in ptg.subnets]
return self._fields(res, fields)
def _get_subnetpools(self, id_list):
context = get_current_context().elevated()
with context.session.begin(subtransactions=True):
filters = {'id': id_list}
return self._get_collection_query(
context, models_v2.SubnetPool, filters=filters).all()
def _make_l2_policy_dict(self, l2p, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_l2_policy_dict(l2p)
res['network_id'] = l2p.network_id
return self._fields(res, fields)
def _make_l3_policy_dict(self, l3p, fields=None):
def _make_l3_policy_dict(self, l3p, fields=None, ip_pool=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_l3_policy_dict(l3p)
res['routers'] = [router.router_id for router in l3p.routers]
@ -159,6 +186,15 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
res['address_scope_v6_id'] = l3p.address_scope_v6_id
res['subnetpools_v4'] = [sp.subnetpool_id for sp in l3p.subnetpools_v4]
res['subnetpools_v6'] = [sp.subnetpool_id for sp in l3p.subnetpools_v6]
if ip_pool:
res['ip_pool'] = ip_pool
subnetpools = self._get_subnetpools(res['subnetpools_v4'] +
res['subnetpools_v6'])
pool_list = [prefix['cidr'] for pool in subnetpools
for prefix in pool['prefixes']]
if pool_list:
res['ip_pool'] = utils.convert_ip_pool_list_to_string(
pool_list)
return self._fields(res, fields)
def _make_external_segment_dict(self, es, fields=None):
@ -603,7 +639,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
self._set_ess_for_l3p(context, l3p_db,
l3p['external_segments'])
context.session.add(l3p_db)
return self._make_l3_policy_dict(l3p_db)
return self._make_l3_policy_dict(l3p_db, ip_pool=l3p['ip_pool'])
@log.log_method_call
def update_l3_policy(self, context, l3_policy_id, l3_policy):

View File

@ -85,7 +85,7 @@ EXTENDED_ATTRIBUTES_2_0 = {
},
gp.L3_POLICIES: {
'proxy_ip_pool': {'allow_post': True, 'allow_put': False,
'validate': {'type:subnet': None},
'validate': {'type:string_or_none': None},
'default': PROXY_CONF.default_proxy_ip_pool,
'is_visible': True},
'proxy_subnet_prefix_length': {

View File

@ -39,6 +39,17 @@ extensions.append_api_extensions_path(gbpservice.neutron.extensions.__path__)
LOG = logging.getLogger(__name__)
opts = [
cfg.StrOpt('default_ip_pool',
default='10.0.0.0/8',
help=_("IP pool for implicitly created default L3 policies, "
"from which subnets are allocated for policy target "
"groups.")),
]
cfg.CONF.register_opts(opts, "group_policy_group")
GBP_CONF = cfg.CONF.group_policy_group
# Group Policy Exceptions
class GbpResourceNotFound(nexc.NotFound):
@ -572,15 +583,19 @@ RESOURCE_ATTRIBUTE_MAP = {
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_int,
'validate': {'type:values': [4, 6]},
# The value 46 is used to indicate dual-stack
# (IPv4 and IPv6)
'validate': {'type:values': [4, 6, 46]},
'default': 4, 'is_visible': True},
'ip_pool': {'allow_post': True, 'allow_put': False,
'validate': {'type:subnet': None},
'default': '10.0.0.0/8', 'is_visible': True},
'validate': {'type:string_or_none': None},
'default': GBP_CONF.default_ip_pool, 'is_visible': True},
'subnet_prefix_length': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_to_int,
# for ipv4 legal values are 2 to 30
# for ipv6 legal values are 2 to 127
# This parameter only applies to ipv4
# prefixes. For IPv4 legal values are
# 2 to 30. For ipv6, this parameter
# is ignored
'default': 24, 'is_visible': True},
'l2_policies': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_list': None},

View File

@ -0,0 +1,32 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
def convert_ip_pool_list_to_string(ip_pool):
if type(ip_pool) is not list:
msg = ("The type of %(ip_pool)s is not a list" %
{'ip_pool': ip_pool})
raise ValueError(msg)
return ', '.join(ip_pool)
def convert_ip_pool_string_to_list(ip_pool_string):
if ip_pool_string and not isinstance(ip_pool_string, six.string_types):
msg = ("The type of %(ip_pool_string)s is not a string "
"or unicode" % {'ip_pool_string': ip_pool_string})
raise ValueError(msg)
if ip_pool_string:
return [prefix.strip() for prefix in ip_pool_string.split(',')]
else:
return []

View File

@ -11,6 +11,7 @@
# under the License.
import hashlib
import netaddr
import re
from aim import aim_manager
@ -26,7 +27,6 @@ from neutron.common import constants as n_constants
from neutron.common import exceptions as n_exc
from neutron import context as n_context
from neutron.db import models_v2
from neutron import manager
from neutron import policy
from oslo_concurrency import lockutils
from oslo_config import cfg
@ -47,6 +47,7 @@ from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_const)
from gbpservice.neutron.services.grouppolicy.common import constants as g_const
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.common import utils
from gbpservice.neutron.services.grouppolicy.drivers import (
neutron_resources as nrd)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
@ -87,6 +88,8 @@ COMMON_TENANT_AIM_RESOURCES = [aim_resource.Contract.__name__,
aim_resource.ContractSubject.__name__,
aim_resource.Filter.__name__,
aim_resource.FilterEntry.__name__]
# REVISIT: override add_router_interface L3 API check for now
NO_VALIDATE = cisco_apic_l3.OVERRIDE_NETWORK_ROUTING_TOPOLOGY_VALIDATION
# REVISIT: Auto-PTG is currently config driven to align with the
# config driven behavior of the older driver but is slated for
@ -115,16 +118,14 @@ opts = [
cfg.CONF.register_opts(opts, "aim_mapping")
class SimultaneousV4V6AddressScopesNotSupportedOnAimDriver(
exc.GroupPolicyBadRequest):
message = _("Both v4 and v6 address_scopes cannot be set "
"simultaneously for a l3_policy.")
class NoValidAddressScope(exc.GroupPolicyBadRequest):
message = _("No address scope was either provided, could be "
"determined, or could be created for a l3_policy.")
class SimultaneousV4V6SubnetpoolsNotSupportedOnAimDriver(
exc.GroupPolicyBadRequest):
message = _("Both v4 and v6 subnetpools cannot be set "
"simultaneously for a l3_policy.")
class InvalidVrfForDualStackAddressScopes(exc.GroupPolicyBadRequest):
message = _("User-specified address scopes for both address families, "
"(IPv4 and IPv6) must use the same ACI VRF.")
class InconsistentAddressScopeSubnetpool(exc.GroupPolicyBadRequest):
@ -189,9 +190,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
@property
def aim_mech_driver(self):
if not self._apic_aim_mech_driver:
ml2plus_plugin = manager.NeutronManager.get_plugin()
self._apic_aim_mech_driver = (
ml2plus_plugin.mechanism_manager.mech_drivers['apic_aim'].obj)
self._core_plugin.mechanism_manager.mech_drivers[
'apic_aim'].obj)
return self._apic_aim_mech_driver
@property
@ -239,74 +240,119 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def aim_display_name(self, name):
return aim_utils.sanitize_display_name(name)
# TODO(tbachman): remove once non-isomorphic address scopes
# are supported
def _validate_address_scopes(self, context, ip_dict):
v4_scope_id = ip_dict.get(4, {}).get('address_scope')
v6_scope_id = ip_dict.get(6, {}).get('address_scope')
if v4_scope_id and v6_scope_id:
v4_scope = self._get_address_scope(
context._plugin_context, v4_scope_id)
v6_scope = self._get_address_scope(
context._plugin_context, v6_scope_id)
if (v4_scope[cisco_apic.DIST_NAMES][cisco_apic.VRF] !=
v6_scope[cisco_apic.DIST_NAMES][cisco_apic.VRF]):
raise InvalidVrfForDualStackAddressScopes()
@log.log_method_call
def create_l3_policy_precommit(self, context):
l3p = context.current
self._check_l3policy_ext_segment(context, l3p)
l3p_req = context.current
self._check_l3policy_ext_segment(context, l3p_req)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, l3p['id'])
if l3p['address_scope_v4_id'] and l3p['address_scope_v6_id']:
raise SimultaneousV4V6AddressScopesNotSupportedOnAimDriver()
if l3p['subnetpools_v4'] and l3p['subnetpools_v6']:
raise SimultaneousV4V6SubnetpoolsNotSupportedOnAimDriver()
mix1 = l3p['address_scope_v4_id'] is not None and l3p['subnetpools_v6']
mix2 = l3p['address_scope_v6_id'] is not None and l3p['subnetpools_v4']
if mix1 or mix2:
raise InconsistentAddressScopeSubnetpool()
context._plugin_context, l3p_req['id'])
# The ip_version tells us what should be supported
ip_version = l3p_req['ip_version']
l3p_db['ip_version'] = ip_version
# First determine the address scope for the address
# families specified in ip_version. We look first at
# explicitly passed address scopes, then the address
# scopes of the subnetpools, then the address scopes
# of default defined subnetpool (via that extension),
# or just create one if none are present
ip_dict = {}
ascp = None
if l3p['address_scope_v6_id'] or l3p['subnetpools_v6']:
l3p_db['ip_version'] = 6
context.current['ip_version'] = 6
ascp = 'address_scope_v6_id'
elif l3p['address_scope_v4_id'] or l3p['subnetpools_v4']:
# Since we are not supporting dual stack yet, if both v4 and
# v6 address_scopes are set, the v4 address_scope will be used
# to set the l3p ip_version
l3p_db['ip_version'] = 4
ascp = 'address_scope_v4_id'
if not ascp:
# Explicit address_scope has not been set
ascp = 'address_scope_v4_id' if l3p_db['ip_version'] == 4 else (
'address_scope_v6_id')
if not l3p[ascp]:
# REVISIT: For dual stack.
# This logic assumes either 4 or 6 but not both
self._use_implicit_address_scope(context)
l3p_db[ascp] = l3p[ascp]
else:
if ip_version == 4 or ip_version == 46:
ip_dict[4] = {'address_scope_key': 'address_scope_v4_id',
'subnetpools_key': 'subnetpools_v4'}
if ip_version == 6 or ip_version == 46:
ip_dict[6] = {'address_scope_key': 'address_scope_v6_id',
'subnetpools_key': 'subnetpools_v6'}
# save VRF DN from v4 family address scope, if implicitly created,
# as we will need to reuse it if we also implicitly create a v6
# address scopes
saved_scope_vrf = None
for family in ip_dict.keys():
explicit_scope = l3p_req[ip_dict[family]['address_scope_key']]
explicit_pools = l3p_req[ip_dict[family]['subnetpools_key']]
default_pool = self._core_plugin.get_default_subnetpool(
context._plugin_context.elevated(), ip_version=family)
ip_pool = utils.convert_ip_pool_string_to_list(l3p_req['ip_pool'])
family_prefixes = [prefix for prefix in ip_pool
if netaddr.IPNetwork(prefix).version == family]
if explicit_scope:
ascp = ip_dict[family]['address_scope'] = explicit_scope
elif explicit_pools:
ascp, _ = self._check_subnetpools_for_same_scope(context,
explicit_pools, None)
ip_dict[family]['address_scope'] = ascp
l3p_db[ip_dict[family]['address_scope_key']] = ascp
elif family_prefixes:
kwargs = {}
if saved_scope_vrf:
kwargs.update({cisco_apic.DIST_NAMES: saved_scope_vrf})
address_scope = self._use_implicit_address_scope(context,
ip_version=family, **kwargs)
ip_dict[family]['address_scope'] = (
l3p_req[ip_dict[family]['address_scope_key']])
saved_scope_vrf = address_scope[
cisco_apic.DIST_NAMES]
elif default_pool and default_pool.get('address_scope_id'):
ip_dict[family]['address_scope'] = (
default_pool['address_scope_id'])
else:
raise NoValidAddressScope()
if explicit_scope or explicit_pools:
# In the case of explicitly provided address_scope or
# subnetpools, set shared flag of L3P to the address_scope
ascp_db = self._get_address_scope(
context._plugin_context, ascp)
l3p_db['shared'] = ascp_db['shared']
context.current['shared'] = l3p_db['shared']
if not explicit_pools and family_prefixes:
# for pools that need to be created, we
# want to use subnet_prefix_length as the
# default for v4 subnets, and /64 for v6
# subnets. If a subnet_prefix_length wasn't
# provided, we use the implict default
if family == 4:
default_prefixlen = l3p_req['subnet_prefix_length'] or 24
else:
default_prefixlen = 64
if family_prefixes:
self._use_implicit_subnetpool(context,
address_scope_id=ip_dict[family]['address_scope'],
ip_version=family, prefixes=family_prefixes,
default_prefixlen=default_prefixlen)
elif not explicit_pools and default_pool:
l3p_req[ip_dict[family]['subnetpools_key']] = [
default_pool['id']]
context._plugin._add_subnetpools_to_l3_policy(
context._plugin_context, l3p_db, [default_pool['id']],
ip_version=family)
# TODO(Sumit): check that l3p['ip_pool'] does not overlap with an
# existing subnetpool associated with the explicit address_scope
pass
self._configure_l3p_for_multiple_subnetpools(context,
l3p_db, ip_version=family,
address_scope_id=ip_dict[family]['address_scope'])
if l3p[ascp]:
# In the case of explicitly provided address_scope, set shared
# flag of L3P to that of the explicit address_scope
ascp_db = self._get_address_scope(
context._plugin_context, l3p[ascp])
l3p_db['shared'] = ascp_db['shared']
context.current['shared'] = l3p_db['shared']
subpool = 'subnetpools_v4' if l3p_db['ip_version'] == 4 else (
'subnetpools_v6')
if not l3p[subpool]:
# REVISIT: For dual stack.
# This logic assumes either 4 or 6 but not both
self._use_implicit_subnetpool(
context, address_scope_id=l3p_db[ascp],
ip_version=l3p_db['ip_version'])
else:
self._configure_l3p_for_multiple_subnetpools(context, l3p_db)
# In the case of explicitly provided subnetpool(s) set shared
# flag of L3P to that of the address_scope associated with the
# subnetpool(s)
ascp_db = self._get_address_scope(
context._plugin_context, l3p_db[ascp])
l3p_db['shared'] = ascp_db['shared']
context.current['shared'] = l3p_db['shared']
self._validate_address_scopes(context, ip_dict)
# REVISIT: Check if the following constraint still holds
if len(l3p['routers']) > 1:
if len(l3p_req['routers']) > 1:
raise exc.L3PolicyMultipleRoutersNotSupported()
# REVISIT: Validate non overlapping IPs in the same tenant.
# Currently this validation is not required for the
@ -314,24 +360,19 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
# driver inheriting from this driver, we are okay
# without the check.
self._reject_invalid_router_access(context)
if not l3p['routers']:
if not l3p_req['routers']:
self._use_implicit_router(context)
if not context.current['external_segments']:
self._use_implicit_external_segment(context)
external_segments = context.current['external_segments']
if external_segments:
self._plug_l3p_routers_to_ext_segment(context, l3p,
self._plug_l3p_routers_to_ext_segment(context, l3p_req,
external_segments)
self._create_implicit_contracts(context, l3p)
self._create_implicit_contracts(context, l3p_req)
@log.log_method_call
def update_l3_policy_precommit(self, context):
self._reject_shared_update(context, 'l3_policy')
if (context.current['subnetpools_v4'] or
context.original['subnetpools_v4']) and (
context.current['subnetpools_v6'] or
context.original['subnetpools_v6']):
raise SimultaneousV4V6SubnetpoolsNotSupportedOnAimDriver()
if context.current['routers'] != context.original['routers']:
raise exc.L3PolicyRoutersUpdateNotSupported()
# Currently there is no support for router update in l3p update.
@ -340,44 +381,49 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
self._validate_in_use_by_nsp(context)
if ((context.current['subnetpools_v4'] and (
context.current['subnetpools_v4'] !=
context.original['subnetpools_v4'])) or (
context.current['subnetpools_v6'] and (
context.current['subnetpools_v6'] !=
context.original['subnetpools_v6']))):
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['id'])
self._configure_l3p_for_multiple_subnetpools(context, l3p_db)
removedv4 = list(set(context.original['subnetpools_v4']) -
set(context.current['subnetpools_v4']))
removedv6 = list(set(context.original['subnetpools_v6']) -
set(context.current['subnetpools_v6']))
for sp_id in (removedv4 + removedv6):
if sp_id in self._get_in_use_subnetpools_for_l3p(context):
raise IncorrectSubnetpoolUpdate(
subnetpool_id=sp_id, l3p_id=context.current['id'])
# If an implicitly created subnetpool is being disassocaited
# we try to delete it
self._cleanup_subnetpool(context._plugin_context, sp_id)
ip_dict = {4: {'address_scope_key': 'address_scope_v4_id',
'subnetpools_key': 'subnetpools_v4'},
6: {'address_scope_key': 'address_scope_v6_id',
'subnetpools_key': 'subnetpools_v6'}}
l3p_orig = context.original
l3p_curr = context.current
for family in ip_dict.keys():
ip_info = ip_dict[family]
if (l3p_curr[ip_info['subnetpools_key']] and (
l3p_curr[ip_info['subnetpools_key']] !=
l3p_orig[ip_info['subnetpools_key']])):
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, l3p_curr['id'])
self._configure_l3p_for_multiple_subnetpools(context,
l3p_db, ip_version=family,
address_scope_id=l3p_db[ip_info['address_scope_key']])
removed = list(set(l3p_orig[ip_info['subnetpools_key']]) -
set(l3p_curr[ip_info['subnetpools_key']]))
for sp_id in removed:
if sp_id in self._get_in_use_subnetpools_for_l3p(context):
raise IncorrectSubnetpoolUpdate(
subnetpool_id=sp_id, l3p_id=l3p_curr['id'])
# If an implicitly created subnetpool is being
# disassocaited we try to delete it
self._cleanup_subnetpool(context._plugin_context, sp_id)
# TODO(Sumit): For extra safety add validation for address_scope change
self._check_l3policy_ext_segment(context, context.current)
old_segment_dict = context.original['external_segments']
new_segment_dict = context.current['external_segments']
if (context.current['external_segments'] !=
context.original['external_segments']):
self._check_l3policy_ext_segment(context, l3p_curr)
old_segment_dict = l3p_orig['external_segments']
new_segment_dict = l3p_curr['external_segments']
if (l3p_curr['external_segments'] !=
l3p_orig['external_segments']):
new_segments = set(new_segment_dict.keys())
old_segments = set(old_segment_dict.keys())
removed = old_segments - new_segments
self._unplug_l3p_routers_from_ext_segment(context,
context.current,
l3p_curr,
removed)
added_dict = {s: new_segment_dict[s]
for s in (new_segments - old_segments)}
if added_dict:
self._plug_l3p_routers_to_ext_segment(context,
context.current,
l3p_curr,
added_dict)
@log.log_method_call
@ -426,7 +472,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
ascp_id = l3p_db[ascp]
ascope = self._get_address_scope(
context._plugin_context, ascp_id)
vrf_dn = ascope['apic:distinguished_names']['VRF']
vrf_dn = ascope[cisco_apic.DIST_NAMES][cisco_apic.VRF]
aim_vrf = self._get_vrf_by_dn(context, vrf_dn)
mapped_aim_resources.append(aim_vrf)
@ -463,7 +509,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
l2p = context.current
net = self._get_network(context._plugin_context,
l2p['network_id'])
default_epg_dn = net['apic:distinguished_names']['EndpointGroup']
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
self._configure_contracts_for_default_epg(
context, l3p_db, default_epg_dn)
if self.create_auto_ptg:
@ -519,7 +565,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
if net:
context.current['status'] = net['status']
default_epg_dn = net['apic:distinguished_names']['EndpointGroup']
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, l2p_db['l3_policy_id'])
aim_resources = self._get_implicit_contracts_for_default_epg(
@ -1127,55 +1173,57 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
if context.original.get('shared') != context.current.get('shared'):
raise SharedAttributeUpdateNotSupported(type=type)
def _configure_l3p_for_multiple_subnetpools(self, context, l3p_db):
ascp = 'address_scope_v4_id' if l3p_db['ip_version'] == 4 else (
'address_scope_v6_id')
subpool = 'subnetpools_v4' if l3p_db['ip_version'] == 4 else (
'subnetpools_v6')
if len(l3p_db[subpool]) == 1:
sp_id = l3p_db[subpool][0]['subnetpool_id']
# admin context to retrieve subnetpools from a different tenant
def _check_subnetpools_for_same_scope(self, context, subnetpools,
ascp, prefixes=None):
sp_ascp = None
for sp_id in subnetpools:
sp = self._get_subnetpool(
# admin context to retrieve subnetpools from
# other tenants
context._plugin_context.elevated(), sp_id)
if not sp['address_scope_id']:
raise NoAddressScopeForSubnetpool()
if len(sp['prefixes']) == 1:
l3p_db['ip_pool'] = sp['prefixes'][0]
l3p_db[ascp] = sp['address_scope_id']
l3p_db['subnet_prefix_length'] = int(sp['default_prefixlen'])
else:
sp_ascp = None
for l3p_subpool_assoc in l3p_db[subpool]:
sp_id = l3p_subpool_assoc['subnetpool_id']
# REVISIT: For dual stack.
# This logic assumes either 4 or 6 but not both
sp = self._get_subnetpool(
# admin context to retrieve subnetpools from
# other tenants
context._plugin_context.elevated(), sp_id)
if not sp['address_scope_id']:
raise NoAddressScopeForSubnetpool()
if not sp_ascp:
if l3p_db[ascp]:
# This is the case where the address_scope
# was explicitly set for the l3p and we need to
# check if it conflicts with the address_scope of
# the first subnetpool
if sp['address_scope_id'] != l3p_db[ascp]:
raise InconsistentAddressScopeSubnetpool()
else:
# No address_scope was explicitly set for the l3p,
# so set it to that of the first subnetpool
l3p_db[ascp] = sp['address_scope_id']
sp_ascp = sp['address_scope_id']
elif sp_ascp != sp['address_scope_id']:
# all subnetpools do not have the same address_scope
raise InconsistentAddressScopeSubnetpool()
LOG.info(_LI("Since multiple subnetpools are configured for "
"this l3_policy, it's ip_pool and "
"subnet_prefix_length attributes will be unset."))
l3p_db['ip_pool'] = None
l3p_db['subnet_prefix_length'] = None
if not sp_ascp:
if ascp:
# This is the case where the address_scope
# was explicitly set for the l3p and we need to
# check if it conflicts with the address_scope of
# the first subnetpool
if sp['address_scope_id'] != ascp:
raise InconsistentAddressScopeSubnetpool()
else:
# No address_scope was explicitly set for the l3p,
# so set it to that of the first subnetpool
ascp = sp['address_scope_id']
sp_ascp = sp['address_scope_id']
elif sp_ascp != sp['address_scope_id']:
# all subnetpools do not have the same address_scope
raise InconsistentAddressScopeSubnetpool()
# aggregate subnetpool prefixes
sp_prefixlist = [prefix for prefix in sp['prefixes']]
if prefixes:
stripped = [prefix.strip() for prefix in prefixes.split(',')]
prefixes = ', '.join(stripped + sp_prefixlist)
else:
prefixes = ', '.join(sp_prefixlist)
return ascp, prefixes
def _configure_l3p_for_multiple_subnetpools(self, context,
l3p_db, ip_version=4,
address_scope_id=None):
l3p_req = context.current
ascp_id_key = 'address_scope_v4_id' if ip_version == 4 else (
'address_scope_v6_id')
subpool_ids_key = 'subnetpools_v4' if ip_version == 4 else (
'subnetpools_v6')
# admin context to retrieve subnetpools from a different tenant
address_scope_id, prefixes = self._check_subnetpools_for_same_scope(
context, l3p_req[subpool_ids_key], address_scope_id,
prefixes=l3p_db['ip_pool'])
l3p_db[ascp_id_key] = address_scope_id
l3p_db['ip_pool'] = prefixes
if l3p_req['subnet_prefix_length']:
l3p_db['subnet_prefix_length'] = l3p_req['subnet_prefix_length']
def _aim_tenant_name(self, session, tenant_id, aim_resource_class=None,
gbp_resource=None, gbp_obj=None):
@ -2247,7 +2295,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
'network_id': subnet['network_id'],
'fixed_ips': [{'subnet_id': subnet['id']}],
'device_id': '',
'device_owner': '',
'device_owner': n_constants.DEVICE_OWNER_ROUTER_INTF,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'name': '%s-%s' % (router_id, subnet['id']),
'admin_state_up': True}
@ -2259,7 +2307,8 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
'interface port in subnet '
'%(subnet)s'),
{'subnet': subnet['id']})
interface_info = {'port_id': intf_port['id']}
interface_info = {'port_id': intf_port['id'],
NO_VALIDATE: True}
try:
self._add_router_interface(plugin_context, router_id,
interface_info)
@ -2276,6 +2325,18 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
self._plug_router_to_subnet(plugin_context, subnet['id'],
router_id)
def _plug_router_to_subnet(self, plugin_context, subnet_id, router_id):
interface_info = {'subnet_id': subnet_id,
NO_VALIDATE: True}
if router_id:
try:
self._add_router_interface(plugin_context, router_id,
interface_info)
except n_exc.BadRequest as e:
LOG.exception(_LE("Adding subnet to router failed, exception:"
"%s"), e)
raise exc.GroupPolicyInternalError()
def _detach_router_from_subnets(self, plugin_context, router_id, sn_ids):
for subnet_id in sn_ids:
# Use admin context because router and subnet may be in
@ -2363,7 +2424,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
network_id = l2p_db['network_id']
admin_context = self._get_admin_context_reuse_session(session)
net = self._get_network(admin_context, network_id)
default_epg_dn = net['apic:distinguished_names']['EndpointGroup']
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
default_epg_name = self._get_epg_name_from_dn(
admin_context, default_epg_dn)
return default_epg_name

View File

@ -58,6 +58,7 @@ from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy.common import constants as g_const
from gbpservice.neutron.services.grouppolicy.common import exceptions as gpexc
from gbpservice.neutron.services.grouppolicy.common import utils
from gbpservice.neutron.services.grouppolicy.drivers import (
resource_mapping as api)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
@ -823,7 +824,8 @@ class ApicMappingDriver(api.ResourceMappingDriver,
self._tenant_by_sharing_policy(l3p))
details['vrf_name'] = self.apic_manager.apic.fvCtx.name(
str(self.name_mapper.l3_policy(context, l3p)))
details['vrf_subnets'] = [l3p['ip_pool']]
details['vrf_subnets'] = utils.convert_ip_pool_string_to_list(
l3p['ip_pool'])
if l3p.get('proxy_ip_pool'):
details['vrf_subnets'].append(l3p['proxy_ip_pool'])

View File

@ -28,6 +28,7 @@ from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
LOG = logging.getLogger(__name__)
GBP_CONF = cfg.CONF.group_policy_group
opts = [
cfg.StrOpt('default_l3_policy_name',
@ -35,10 +36,10 @@ opts = [
help=_("Name of each tenant's default L3 policy.")),
cfg.IntOpt('default_ip_version',
default=4,
help=_("IP version (4 or 6) for implicitly created default L3 "
"policies.")),
help=_("IP version (4, 6 or 46) for implicitly created default "
"L3 policies.")),
cfg.StrOpt('default_ip_pool',
default='10.0.0.0/8',
default=GBP_CONF.default_ip_pool,
help=_("IP pool for implicitly created default L3 policies, "
"from which subnets are allocated for policy target "
"groups.")),

View File

@ -16,6 +16,7 @@ import operator
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as k_client
from neutron._i18n import _LE
from neutron._i18n import _LI
from neutron._i18n import _LW
from neutron.api.v2 import attributes
from neutron.common import constants as const
@ -42,6 +43,7 @@ from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.common import utils as gbp_utils
from gbpservice.neutron.services.grouppolicy.drivers import nsp_manager
@ -54,10 +56,23 @@ opts = [
default=[],
help=_("List of DNS nameservers to be configured for the "
"PTG subnets")),
cfg.StrOpt('default_ipv6_ra_mode',
default=None,
help=_("default IPv6 Router Advertisement mode for subnets "
"created implicitly for L3 policies. Valid values are "
"'slaac', 'dhcpv6-stateful', and 'dhcpv6-stateless'")),
cfg.StrOpt('default_ipv6_address_mode',
default=None,
help=_("default IPv6 address assignment mode for subnets "
"created implicitly for L3 policies. Valid values are "
"'slaac', 'dhcpv6-stateful', and 'dhcpv6-stateless'")),
]
cfg.CONF.register_opts(opts, "resource_mapping")
# something to shorten the config family name
MAPPING_CFG = cfg.CONF.resource_mapping
class OwnedPort(model_base.BASEV2):
"""A Port owned by the resource_mapping driver."""
@ -267,11 +282,12 @@ class ImplicitResourceOperations(local_api.LocalAPI,
self._mark_address_scope_owned(context._plugin_context.session, as_id)
return address_scope
def _use_implicit_address_scope(self, context):
def _use_implicit_address_scope(self, context, ip_version=4, **kwargs):
address_scope = self._create_implicit_address_scope(
context, name='l3p_' + context.current['name'])
context.set_address_scope_id(address_scope['id'],
context.current['ip_version'])
context, name='l3p_' + context.current['name'] +
'_' + str(ip_version), ip_version=ip_version, **kwargs)
context.set_address_scope_id(address_scope['id'], ip_version)
return address_scope
def _cleanup_address_scope(self, plugin_context, address_scope_id):
if self._address_scope_is_owned(plugin_context.session,
@ -292,7 +308,8 @@ class ImplicitResourceOperations(local_api.LocalAPI,
'name': context.current['name'], 'ip_version':
context.current['ip_version'],
'default_prefixlen': context.current['subnet_prefix_length'],
'prefixes': [context.current['ip_pool']],
'prefixes': gbp_utils.convert_ip_pool_string_to_list(
context.current['ip_pool']),
'shared': context.current.get('shared', False),
# Per current understanding, is_default is used for
# auto_allocation and is a per-tenant setting.
@ -304,10 +321,11 @@ class ImplicitResourceOperations(local_api.LocalAPI,
self._mark_subnetpool_owned(context._plugin_context.session, sp_id)
return subnetpool
def _use_implicit_subnetpool(self, context, address_scope_id, ip_version):
def _use_implicit_subnetpool(self, context, address_scope_id,
ip_version=4, **kwargs):
subnetpool = self._create_implicit_subnetpool(
context, name='l3p_' + context.current['name'],
address_scope_id=address_scope_id)
address_scope_id=address_scope_id, ip_version=ip_version, **kwargs)
context.add_subnetpool(subnetpool_id=subnetpool['id'],
ip_version=ip_version)
@ -360,7 +378,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': (
cfg.CONF.resource_mapping.dns_nameservers or
MAPPING_CFG.dns_nameservers or
attributes.ATTR_NOT_SPECIFIED),
'host_routes': attributes.ATTR_NOT_SPECIFIED}
attrs.update(subnet_specifics)
@ -448,10 +466,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
# REVISIT(rkukura): The folowing is a temporary allocation
# algorithm that should be replaced with use of a neutron
# subnet pool.
pool = netaddr.IPSet(
iterable=[l3p['proxy_ip_pool'] if is_proxy else
l3p['ip_pool']])
pool = netaddr.IPSet(gbp_utils.convert_ip_pool_string_to_list(
l3p['proxy_ip_pool']) if is_proxy else
gbp_utils.convert_ip_pool_string_to_list(l3p['ip_pool']))
prefixlen = prefix_len or (
l3p['proxy_subnet_prefix_length'] if is_proxy
else l3p['subnet_prefix_length'])
@ -533,69 +550,75 @@ class ImplicitResourceOperations(local_api.LocalAPI,
l3p_id = l2p['l3_policy_id']
l3p_db = context._plugin.get_l3_policy(context._plugin_context, l3p_id)
# Only allocate from subnetpools that belong to this tenant
filters = {'tenant_id': [context.current['tenant_id']]}
# REVISIT: For dual stack
# Current assumption is that either v4 or v6 subnet needs to be
# allocated, but not both
ip_dict = {}
if l3p_db['address_scope_v4_id']:
filters['id'] = l3p_db['subnetpools_v4']
ip_version = 4
else:
filters['id'] = l3p_db['subnetpools_v6']
ip_version = 6
# All relevant subnetpools owned by this tenant
candidate_subpools = self._get_subnetpools(
context._plugin_context, filters) or []
del filters['tenant_id']
filters['shared'] = [True]
# All relevant shared subnetpools
shared_subpools = self._get_subnetpools(
context._plugin_context, filters) or []
# Union of the above two lists of subnetpools
candidate_subpools = {x['id']: x for x in candidate_subpools +
shared_subpools}.values()
subnet = None
for pool in candidate_subpools:
try:
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'ptg_' + context.current['name'],
'network_id': l2p['network_id'],
'ip_version': ip_version,
'subnetpool_id': pool['id'],
'cidr': attributes.ATTR_NOT_SPECIFIED,
'prefixlen': attributes.ATTR_NOT_SPECIFIED,
'enable_dhcp': True,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': (
cfg.CONF.resource_mapping.dns_nameservers or
attributes.ATTR_NOT_SPECIFIED),
'host_routes': attributes.ATTR_NOT_SPECIFIED}
if ip_version == 6:
if 'ipv6_ra_mode' not in subnet_specifics:
subnet_specifics['ipv6_ra_mode'] = (
attributes.ATTR_NOT_SPECIFIED)
if 'ipv6_address_mode' not in subnet_specifics:
subnet_specifics['ipv6_address_mode'] = (
attributes.ATTR_NOT_SPECIFIED)
attrs.update(subnet_specifics)
subnet = self._create_subnet(context._plugin_context, attrs)
self._mark_subnet_owned(context._plugin_context.session,
subnet['id'])
LOG.debug("Allocated subnet %(sub)s from subnetpool: %(sp)s.",
{'sub': subnet['id'], 'sp': pool['id']})
break
except Exception as e:
LOG.exception(_LE("Allocating subnet from subnetpool %(sp)s "
"failed. Allocation will be attempted "
"from any other configured "
"subnetpool(s). Exception: %(excp)s"),
{'sp': pool['id'], 'excp': e})
last = e
continue
ip_dict[4] = {'address_scope_key': 'address_scope_v4_id',
'subnetpools_key': 'subnetpools_v4'}
if l3p_db['address_scope_v6_id']:
ip_dict[6] = {'address_scope_key': 'address_scope_v6_id',
'subnetpools_key': 'subnetpools_v6'}
subnets = []
for ip_version in ip_dict.keys():
filters = {'tenant_id': [context.current['tenant_id']],
'id': l3p_db[ip_dict[ip_version]['subnetpools_key']]}
# All relevant subnetpools owned by this tenant
candidate_subpools = self._get_subnetpools(
context._plugin_context, filters) or []
del filters['tenant_id']
filters['shared'] = [True]
# All relevant shared subnetpools
shared_subpools = self._get_subnetpools(
context._plugin_context, filters) or []
# Union of the above two lists of subnetpools
candidate_subpools = {x['id']: x for x in candidate_subpools +
shared_subpools}.values()
subnet = None
for pool in candidate_subpools:
try:
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'ptg_' + context.current['name'],
'network_id': l2p['network_id'],
'ip_version': ip_version,
'subnetpool_id': pool['id'],
'cidr': attributes.ATTR_NOT_SPECIFIED,
'prefixlen': attributes.ATTR_NOT_SPECIFIED,
'enable_dhcp': True,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': (
MAPPING_CFG.dns_nameservers or
attributes.ATTR_NOT_SPECIFIED),
'host_routes': attributes.ATTR_NOT_SPECIFIED}
if ip_version == 6:
if 'ipv6_ra_mode' not in subnet_specifics:
subnet_specifics['ipv6_ra_mode'] = (
MAPPING_CFG.default_ipv6_ra_mode or
attributes.ATTR_NOT_SPECIFIED)
if 'ipv6_address_mode' not in subnet_specifics:
subnet_specifics['ipv6_address_mode'] = (
MAPPING_CFG.default_ipv6_address_mode or
attributes.ATTR_NOT_SPECIFIED)
attrs.update(subnet_specifics)
subnet = self._create_subnet(context._plugin_context,
attrs)
self._mark_subnet_owned(context._plugin_context.session,
subnet['id'])
LOG.debug("Allocated subnet %(sub)s from subnetpool: "
"%(sp)s.", {'sub': subnet['id'],
'sp': pool['id']})
subnets.append(subnet)
break
except Exception as e:
LOG.info(_LI("Allocating subnet from subnetpool %(sp)s "
"failed. Allocation will be attempted "
"from any other configured "
"subnetpool(s). Exception: %(excp)s"),
{'sp': pool['id'], 'excp': e})
last = e
continue
if subnet:
return [subnet]
if subnets:
return subnets
else:
# In the case of multiple subnetpools configured, the failure
# condition for subnet allocation on earlier subnetpools might
@ -634,29 +657,55 @@ class ImplicitResourceOperations(local_api.LocalAPI,
last = exc.NoSubnetAvailable()
subnets = subnets or self._get_subnets(context._plugin_context,
{'id': ptg['subnets']})
v4_subnets = [subnet for subnet in subnets
if subnet['ip_version'] == 4]
v6_subnets = [subnet for subnet in subnets
if subnet['ip_version'] == 6]
for subnet in subnets:
try:
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'pt_' + context.current['name'],
'network_id': l2p['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': [{'subnet_id': subnet['id']}],
'device_id': '',
'device_owner': '',
'security_groups': [sg_id] if sg_id else None,
'admin_state_up': True}
if context.current.get('group_default_gateway'):
attrs['fixed_ips'][0]['ip_address'] = subnet['gateway_ip']
attrs.update(context.current.get('port_attributes', {}))
port = self._create_port(context._plugin_context, attrs)
port_id = port['id']
self._mark_port_owned(context._plugin_context.session, port_id)
context.set_port_id(port_id)
return
except n_exc.IpAddressGenerationFailure as ex:
LOG.warning(_LW("No more address available in subnet %s"),
subnet['id'])
last = ex
def subnet_family_generator(family_subnets):
def inner():
for subnet in family_subnets:
yield subnet
return inner
# For dual-stack, try to create with this subnet and
# a subnet from the other address family. Try this for
# each address family
if not (v4_subnets and v6_subnets):
# for single stack, we need the empty list to
# guarantee a single iteration
subnet_generator = subnet_family_generator([''])
elif subnet['ip_version'] == 4:
subnet_generator = subnet_family_generator(v6_subnets)
else:
subnet_generator = subnet_family_generator(v4_subnets)
for alt_subnet in subnet_generator():
fixed_ips = [{'subnet_id': subnet['id']}]
if alt_subnet:
fixed_ips.append({'subnet_id': alt_subnet['id']})
try:
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'pt_' + context.current['name'],
'network_id': l2p['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': '',
'device_owner': '',
'security_groups': [sg_id] if sg_id else None,
'admin_state_up': True}
if context.current.get('group_default_gateway'):
attrs['fixed_ips'][0]['ip_address'] = subnet[
'gateway_ip']
attrs.update(context.current.get('port_attributes', {}))
port = self._create_port(context._plugin_context, attrs)
port_id = port['id']
self._mark_port_owned(context._plugin_context.session,
port_id)
context.set_port_id(port_id)
return
except n_exc.IpAddressGenerationFailure as ex:
LOG.warning(_LW("No more address available in subnet %s"),
subnet['id'])
last = ex
raise last
def _cleanup_port(self, plugin_context, port_id):
@ -1636,12 +1685,17 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
subnets = []
for l3p in l3ps:
if l3p['id'] != curr['id']:
subnets.append(l3p['ip_pool'])
for prefix in gbp_utils.convert_ip_pool_string_to_list(
l3p['ip_pool']):
if prefix:
subnets.append(prefix)
if 'proxy_ip_pool' in l3p:
subnets.append(l3p['proxy_ip_pool'])
l3p_subnets = [curr['ip_pool']]
subnets.extend(gbp_utils.convert_ip_pool_string_to_list(
l3p['proxy_ip_pool']))
l3p_subnets = gbp_utils.convert_ip_pool_string_to_list(curr['ip_pool'])
if 'proxy_ip_pool' in curr:
l3p_subnets.append(curr['proxy_ip_pool'])
l3p_subnets.extend(gbp_utils.convert_ip_pool_string_to_list(
curr['proxy_ip_pool']))
current_set = netaddr.IPSet(subnets)
l3p_set = netaddr.IPSet(l3p_subnets)
@ -2585,8 +2639,12 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
admin_context,
filters={'tenant_id': [tenant_id or context.current['tenant_id']]})
ip_pool_list = [x['ip_pool'] for x in l3ps if
x['ip_pool'] not in exclude]
ip_pool_list = []
for l3p in l3ps:
for prefix in gbp_utils.convert_ip_pool_string_to_list(
l3p['ip_pool']):
if prefix not in exclude:
ip_pool_list.append(prefix)
l3p_set = netaddr.IPSet(ip_pool_list)
return [str(x) for x in (netaddr.IPSet(cidrs) - l3p_set).iter_cidrs()]
@ -2632,12 +2690,13 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
filters={'tenant_id': context.current['tenant_id']})
for ep in ep_list:
# Remove rules before the new ip_pool came
ip_pool_list = gbp_utils.convert_ip_pool_string_to_list(ip_pool)
cidr_list = self._get_ep_cidr_list(context, ep)
old_cidrs = self._process_external_cidrs(context, cidr_list,
exclude=[ip_pool])
exclude=ip_pool_list)
new_cidrs = [str(x) for x in
(netaddr.IPSet(old_cidrs) -
netaddr.IPSet([ip_pool])).iter_cidrs()]
netaddr.IPSet(ip_pool_list)).iter_cidrs()]
self._refresh_ep_cidrs_rules(context, ep, new_cidrs, old_cidrs)
def _process_remove_l3p_ip_pool(self, context, ip_pool):
@ -2647,13 +2706,14 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
filters={'tenant_id': context.current['tenant_id']})
for ep in ep_list:
# Cidrs before the ip_pool removal
ip_pool_list = gbp_utils.convert_ip_pool_string_to_list(ip_pool)
cidr_list = self._get_ep_cidr_list(context, ep)
new_cidrs = self._process_external_cidrs(context, cidr_list,
exclude=[ip_pool])
exclude=ip_pool_list)
# Cidrs after the ip_pool removal
old_cidrs = [str(x) for x in
(netaddr.IPSet(new_cidrs) |
netaddr.IPSet([ip_pool])).iter_cidrs()]
netaddr.IPSet(ip_pool_list)).iter_cidrs()]
self._refresh_ep_cidrs_rules(context, ep, new_cidrs, old_cidrs)
def _set_l3p_external_routes(self, context, added=None, removed=None):

View File

@ -37,6 +37,7 @@ from gbpservice.neutron.services.grouppolicy import (
policy_driver_manager as manager)
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
from gbpservice.neutron.services.grouppolicy.common import utils
from gbpservice.neutron.services.servicechain.plugins.ncp import (
model as ncp_model)
@ -247,7 +248,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
l3ps = self.get_l3_policies(
context, filters={'id': current['l3_policies']})
for l3p in l3ps:
if netaddr.IPSet([l3p['ip_pool']]) & added_ipset:
ip_pool_list = utils.convert_ip_pool_string_to_list(
l3p['ip_pool'])
if netaddr.IPSet(ip_pool_list) & added_ipset:
raise gp_exc.ExternalRouteOverlapsWithL3PIpPool(
destination=added_dest, l3p_id=l3p['id'],
es_id=current['id'])
@ -273,7 +276,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
if added:
es_list = self.get_external_segments(context,
filters={'id': added})
l3p_ipset = netaddr.IPSet([current['ip_pool']])
ip_pool_list = utils.convert_ip_pool_string_to_list(
current['ip_pool'])
l3p_ipset = netaddr.IPSet(ip_pool_list)
for es in es_list:
# Verify no route overlap
dest_set = set(x['destination'] for x in

View File

@ -217,8 +217,12 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
external_ipset = netaddr.IPSet([x['destination']
for x in es['external_routes']])
if l3p_list:
result = external_ipset - netaddr.IPSet([x['ip_pool']
for x in l3p_list])
prefixes = []
for l3p in l3p_list:
if l3p['ip_pool']:
for prefix in l3p['ip_pool'].split(','):
prefixes.append(prefix.strip())
result = external_ipset - netaddr.IPSet(prefixes)
else:
result = external_ipset