SFC integration for AIM driver

Based on the openstack networking-sfc project API:

- Introducing an SFC driver that maps the SFC model (port pairs,
  port pair groups, port chains) to the AIM Service Graph model;
- Introducing a FlowClassifier driver that maps the FLOWC model (flow
  classifier) to the AIM Service Graph model;
- Adding some registry notifications to the AIM MD and the FLOWC driver
  for business logic validation.

Current divergence/limitations from the upstream SFC API:

- Added 2 l7_parameters to the flow classifier API,
  source_logica_network and destination_logical_network. Representing
  the networks involved in the traffic redirection mechanism;
- Every valid flow classifier must include the l7_parameters as
  mentioned above. Internal networks and SVI networks are valid values,
  but --external networks are excluded;
  When SVI networks are specified, the corresponding source/destination
  IP prefix must be specified in the API;
- Any other FlowClassifier parameter other than the ones mentioned
  above will be ignored;
- On port binding, the chain will fix itself;
- Trunk ports are supported on port-pairs;
- On PPGs, all the Port Pairs must be in the same network pair;
- Ports in Port Pairs must have a univocally retrievable APIC Domain;
- Ports in Port Pairs can't be in the same network;
- Flowc src/dst networks must be distinct;
- Flowc can't be updated if in use by a chain;
- Networks partecipating a port chain must be in the same VRF;
- Src and Dst networks in a chain must be in the same tenant
  (temporarily);
- Port Pair's ports' network can't be external or SVI;
- Port Pair's ports' networks can't be re-used in the same PPG.

Change-Id: If40595584ef46f1ac2aa0cf7525e16447f491f48
This commit is contained in:
Ivar Lazzaro 2017-11-14 14:24:29 -08:00
parent ef3ad5e026
commit 711b4eac75
No known key found for this signature in database
GPG Key ID: ACEEC8CB558DC3CF
16 changed files with 2575 additions and 54 deletions

View File

@ -61,6 +61,8 @@ from gbpservice.neutron.tests.unit.plugins.ml2plus.drivers import ( # noqa
from gbpservice.neutron.tests.unit.services.grouppolicy.drivers import ( # noqa
extension_test
)
from networking_sfc.db import flowclassifier_db # noqa
from networking_sfc.db import sfc_db # noqa
# Note that the models in gbpservice.nfp.orchestrator.db.nfp_db_model
# are managed by a separate fixture, so are not imported here.

View File

@ -26,7 +26,6 @@ from neutron_lib.api import validators
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_log import log
from oslo_utils import uuidutils
from sqlalchemy import event
from sqlalchemy.orm import exc
@ -433,3 +432,127 @@ def _get_tenant_id_for_create(self, context, resource):
common_db_mixin.CommonDbMixin._get_tenant_id_for_create = (
_get_tenant_id_for_create)
# TODO(ivar): while this block would be better place in the patch_neutron
# module, it seems like being part of an "extension" package is the only
# way to make it work at the moment. Tests have shown that Neutorn reloads
# the extensions at every call (at least in the UTs) and this causes the
# AIM_FLC_L7_PARAMS to be reset over and over. By patching at this point,
# we make sure we always have the proper value for that variable.
try:
import six
import sys
from networking_sfc.db import flowclassifier_db
from networking_sfc.db import sfc_db
from networking_sfc.extensions import flowclassifier as fc_ext
from neutron.services.trunk import constants as tcst
from oslo_utils import uuidutils
from gbpservice.neutron.services.sfc.aim import constants as sfc_cts
if 'flowclassifier' in sys.modules:
sys.modules['flowclassifier'].SUPPORTED_L7_PARAMETERS.update(
sfc_cts.AIM_FLC_L7_PARAMS)
if 'networking_sfc.extensions.flowclassifier' in sys.modules:
sys.modules[
('networking_sfc.extensions.'
'flowclassifier')].SUPPORTED_L7_PARAMETERS.update(
sfc_cts.AIM_FLC_L7_PARAMS)
# REVISIT(ivar): The following diff will fix flow classifier creation
# method when using L7 parameters.
# - key: L7Parameter(key, val)
# + key: L7Parameter(keyword=key, value=val)
# Also, make sure classifiers with different l7 params are not considered
# conflicting
def create_flow_classifier(self, context, flow_classifier):
fc = flow_classifier['flow_classifier']
tenant_id = fc['tenant_id']
l7_parameters = {
key: flowclassifier_db.L7Parameter(keyword=key, value=val)
for key, val in six.iteritems(fc['l7_parameters'])}
ethertype = fc['ethertype']
protocol = fc['protocol']
source_port_range_min = fc['source_port_range_min']
source_port_range_max = fc['source_port_range_max']
self._check_port_range_valid(source_port_range_min,
source_port_range_max,
protocol)
destination_port_range_min = fc['destination_port_range_min']
destination_port_range_max = fc['destination_port_range_max']
self._check_port_range_valid(destination_port_range_min,
destination_port_range_max,
protocol)
source_ip_prefix = fc['source_ip_prefix']
self._check_ip_prefix_valid(source_ip_prefix, ethertype)
destination_ip_prefix = fc['destination_ip_prefix']
self._check_ip_prefix_valid(destination_ip_prefix, ethertype)
logical_source_port = fc['logical_source_port']
logical_destination_port = fc['logical_destination_port']
with context.session.begin(subtransactions=True):
if logical_source_port is not None:
self._get_port(context, logical_source_port)
if logical_destination_port is not None:
self._get_port(context, logical_destination_port)
query = self._model_query(
context, flowclassifier_db.FlowClassifier)
for flow_classifier_db in query.all():
if self.flowclassifier_conflict(
fc,
flow_classifier_db
):
# REVISIT(ivar): Conflict considers l7_parameters
if (validators.is_attr_set(fc['l7_parameters']) and
validators.is_attr_set(
flow_classifier_db['l7_parameters'])):
if (fc['l7_parameters'] ==
flow_classifier_db['l7_parameters']):
raise fc_ext.FlowClassifierInConflict(
id=flow_classifier_db['id']
)
flow_classifier_db = flowclassifier_db.FlowClassifier(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fc['name'],
description=fc['description'],
ethertype=ethertype,
protocol=protocol,
source_port_range_min=source_port_range_min,
source_port_range_max=source_port_range_max,
destination_port_range_min=destination_port_range_min,
destination_port_range_max=destination_port_range_max,
source_ip_prefix=source_ip_prefix,
destination_ip_prefix=destination_ip_prefix,
logical_source_port=logical_source_port,
logical_destination_port=logical_destination_port,
l7_parameters=l7_parameters
)
context.session.add(flow_classifier_db)
return self._make_flow_classifier_dict(flow_classifier_db)
flowclassifier_db.FlowClassifierDbPlugin.create_flow_classifier = (
create_flow_classifier)
# NOTE(ivar): Trunk subports don't have a device ID, we need this
# validation to pass
def _validate_port_pair_ingress_egress(self, ingress, egress):
if any(port.get('device_owner') == tcst.TRUNK_SUBPORT_OWNER
for port in [ingress, egress]):
return
if 'device_id' not in ingress or not ingress['device_id']:
raise sfc_db.ext_sfc.PortPairIngressNoHost(
ingress=ingress['id']
)
if 'device_id' not in egress or not egress['device_id']:
raise sfc_db.ext_sfc.PortPairEgressNoHost(
egress=egress['id']
)
if ingress['device_id'] != egress['device_id']:
raise sfc_db.ext_sfc.PortPairIngressEgressDifferentHost(
ingress=ingress['id'],
egress=egress['id'])
sfc_db.SfcDbPlugin._validate_port_pair_ingress_egress = (
_validate_port_pair_ingress_egress)
except ImportError as e:
LOG.warning("Import error while patching networking-sfc: %s",
e.message)

View File

@ -28,6 +28,11 @@ L3_POLICY_TYPE_TAG = 'l3p'
POLICY_RULE_SET_TYPE_TAG = 'prs'
POLICY_RULE_TYPE_TAG = 'pr'
APPLICATION_POLICY_GROUP_TYPE_TAG = 'apg'
PORT_PAIR_GROUP_TYPE_TAG = 'ppg'
PORT_PAIR_TYPE_TAG = 'pp'
PORT_TYPE_TAG = 'prt'
FLOW_CLASSIFIER_TYPE_TAG = 'flc'
PORT_CHAIN_TYPE_TAG = 'ptc'
class APICNameMapper(object):
@ -99,6 +104,38 @@ class APICNameMapper(object):
return self._unmap(
session, name, APPLICATION_POLICY_GROUP_TYPE_TAG, prefix, enforce)
def port_pair_group(self, session, id, prefix=""):
return self._map(session, id, PORT_PAIR_GROUP_TYPE_TAG, prefix)
def reverse_port_pair_group(self, session, name, prefix="", enforce=True):
return self._unmap(session, name, PORT_PAIR_GROUP_TYPE_TAG, prefix,
enforce)
def port_pair(self, session, id, prefix=""):
return self._map(session, id, PORT_PAIR_TYPE_TAG, prefix)
def reverse_port_pair(self, session, name, prefix="", enforce=True):
return self._unmap(session, name, PORT_PAIR_TYPE_TAG, prefix, enforce)
def port(self, session, id, prefix=""):
return self._map(session, id, PORT_TYPE_TAG, prefix)
def reverse_port(self, session, name, prefix="", enforce=True):
return self._unmap(session, name, PORT_TYPE_TAG, prefix, enforce)
def flow_classifier(self, session, id, prefix=""):
return self._map(session, id, FLOW_CLASSIFIER_TYPE_TAG, prefix)
def reverse_flow_classifier(self, session, name, prefix="", enforce=True):
return self._unmap(session, name, FLOW_CLASSIFIER_TYPE_TAG, prefix,
enforce)
def port_chain(self, session, id, prefix=""):
return self._map(session, id, PORT_CHAIN_TYPE_TAG, prefix)
def reverse_port_chain(self, session, name, prefix="", enforce=True):
return self._unmap(session, name, PORT_CHAIN_TYPE_TAG, prefix, enforce)
def _map(self, session, id, type_tag, prefix):
return ("%(prefix)s%(type_tag)s_%(id)s" %
{'prefix': prefix, 'type_tag': type_tag, 'id': id})

View File

@ -26,6 +26,8 @@ from aim.common import utils
from aim import context as aim_context
from aim import utils as aim_utils
from neutron.agent import securitygroups_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.common import rpc as n_rpc
from neutron.common import topics as n_topics
from neutron import context as nctx
@ -42,7 +44,9 @@ from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron import manager
from neutron.plugins.common import constants as pconst
from neutron.plugins.ml2 import db as n_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context as ml2_context
from neutron.plugins.ml2 import models
from neutron_lib import constants as n_constants
from neutron_lib import exceptions as n_exceptions
@ -70,6 +74,7 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import db
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import exceptions
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import extension_db
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import trunk_driver
from gbpservice.neutron.services.sfc.aim import constants as sfc_cts
LOG = log.getLogger(__name__)
DEVICE_OWNER_SNAT_PORT = 'apic:snat-pool'
@ -521,7 +526,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
epg.bd_name = bd.name
self.aim.create(aim_ctx, epg)
self._add_network_mapping(session, current['id'], bd, epg, vrf)
self._add_network_mapping_and_notify(
context._plugin_context, current['id'], bd, epg, vrf)
def update_network_precommit(self, context):
current = context.current
@ -1260,7 +1266,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# router topology.
vrf = self._ensure_default_vrf(aim_ctx, intf_vrf)
self._move_topology(
aim_ctx, router_topology, router_vrf, vrf,
context, aim_ctx, router_topology, router_vrf, vrf,
nets_to_notify)
router_topo_moved = True
self._cleanup_default_vrf(aim_ctx, router_vrf)
@ -1271,7 +1277,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
vrf = router_vrf
if net_intfs:
self._move_topology(
aim_ctx, intf_topology, intf_vrf, vrf,
context, aim_ctx, intf_topology, intf_vrf, vrf,
nets_to_notify)
self._cleanup_default_vrf(aim_ctx, intf_vrf)
else:
@ -1292,10 +1298,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# First interface for network.
if network_db.aim_mapping.epg_name:
bd, epg = self._associate_network_with_vrf(
aim_ctx, network_db, vrf, nets_to_notify)
context, aim_ctx, network_db, vrf, nets_to_notify)
elif network_db.aim_mapping.l3out_name:
l3out, epg = self._associate_network_with_vrf(
aim_ctx, network_db, vrf, nets_to_notify)
context, aim_ctx, network_db, vrf, nets_to_notify)
else:
# Network is already routed.
#
@ -1458,7 +1464,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
if old_vrf.identity != intf_vrf.identity:
intf_vrf = self._ensure_default_vrf(aim_ctx, intf_vrf)
self._move_topology(
aim_ctx, intf_topology, old_vrf, intf_vrf,
context, aim_ctx, intf_topology, old_vrf, intf_vrf,
nets_to_notify)
# See if the router's topology must be moved.
@ -1471,7 +1477,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
if old_vrf.identity != router_vrf.identity:
router_vrf = self._ensure_default_vrf(aim_ctx, router_vrf)
self._move_topology(
aim_ctx, router_topology, old_vrf, router_vrf,
context, aim_ctx, router_topology, old_vrf, router_vrf,
nets_to_notify)
router_topo_moved = True
@ -1479,7 +1485,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# network's BD unrouted.
if not router_ids:
self._dissassociate_network_from_vrf(
aim_ctx, network_db, old_vrf, nets_to_notify)
context, aim_ctx, network_db, old_vrf, nets_to_notify)
if scope_id == NO_ADDR_SCOPE:
self._cleanup_default_vrf(aim_ctx, old_vrf)
@ -1538,7 +1544,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
vnic_type)
return
if port['binding:host_id'].startswith(FABRIC_HOST_ID):
if port[portbindings.HOST_ID].startswith(FABRIC_HOST_ID):
for segment in context.segments_to_bind:
context.set_binding(segment[api.ID],
VIF_TYPE_FABRIC,
@ -1644,6 +1650,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
context.bottom_bound_segment[api.NETWORK_TYPE])):
self._associate_domain(context, is_vmm=True)
self._update_sg_rule_with_remote_group_set(context, port)
registry.notify(sfc_cts.GBP_PORT, events.PRECOMMIT_UPDATE,
self, driver_context=context)
def update_port_postcommit(self, context):
port = context.current
@ -2161,7 +2169,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
distinct())
return rtr_dbs
def _associate_network_with_vrf(self, aim_ctx, network_db, new_vrf,
def _associate_network_with_vrf(self, ctx, aim_ctx, network_db, new_vrf,
nets_to_notify):
LOG.debug("Associating previously unrouted network %(net_id)s named "
"'%(net_name)s' in project %(net_tenant)s with VRF %(vrf)s",
@ -2193,7 +2201,6 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
bd.vrf_name = new_vrf.name
bd = self.aim.create(aim_ctx, bd)
self._set_network_bd(network_db.aim_mapping, bd)
epg = self.aim.get(aim_ctx, epg)
self.aim.delete(aim_ctx, epg)
# ensure app profile exists in destination tenant
@ -2203,7 +2210,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.aim.create(aim_ctx, ap)
epg.tenant_name = new_vrf.tenant_name
epg = self.aim.create(aim_ctx, epg)
self._set_network_epg(network_db.aim_mapping, epg)
self._set_network_epg_and_notify(ctx, network_db.aim_mapping,
epg)
else:
old_l3out = self.aim.get(aim_ctx, l3out)
l3out = copy.copy(old_l3out)
@ -2227,7 +2235,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
l3out = self.aim.update(aim_ctx, l3out,
vrf_name=new_vrf.name)
self._set_network_vrf(network_db.aim_mapping, new_vrf)
self._set_network_vrf_and_notify(ctx, network_db.aim_mapping, new_vrf)
# All non-router ports on this network need to be notified
# since their BD's VRF and possibly their BD's and EPG's
@ -2240,8 +2248,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
ext_net = self._get_network_l3out_ext_net(network_db.aim_mapping)
return l3out, ext_net
def _dissassociate_network_from_vrf(self, aim_ctx, network_db, old_vrf,
nets_to_notify):
def _dissassociate_network_from_vrf(self, ctx, aim_ctx, network_db,
old_vrf, nets_to_notify):
LOG.debug("Dissassociating network %(net_id)s named '%(net_name)s' in "
"project %(net_tenant)s from VRF %(vrf)s",
{'net_id': network_db.id, 'net_name': network_db.name,
@ -2270,13 +2278,13 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
bd.vrf_name = new_vrf.name
bd = self.aim.create(aim_ctx, bd)
self._set_network_bd(network_db.aim_mapping, bd)
epg = self._get_network_epg(network_db.aim_mapping)
epg = self.aim.get(aim_ctx, epg)
self.aim.delete(aim_ctx, epg)
epg.tenant_name = new_tenant_name
epg = self.aim.create(aim_ctx, epg)
self._set_network_epg(network_db.aim_mapping, epg)
self._set_network_epg_and_notify(ctx, network_db.aim_mapping,
epg)
else:
l3out = self._get_network_l3out(network_db.aim_mapping)
old_l3out = self.aim.get(aim_ctx, l3out)
@ -2303,14 +2311,14 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
l3out = self.aim.update(aim_ctx, l3out,
vrf_name=new_vrf.name)
self._set_network_vrf(network_db.aim_mapping, new_vrf)
self._set_network_vrf_and_notify(ctx, network_db.aim_mapping, new_vrf)
# All non-router ports on this network need to be notified
# since their BD's VRF and possibly their BD's and EPG's
# Tenants have changed.
nets_to_notify.add(network_db.id)
def _move_topology(self, aim_ctx, topology, old_vrf, new_vrf,
def _move_topology(self, ctx, aim_ctx, topology, old_vrf, new_vrf,
nets_to_notify):
LOG.info(_LI("Moving routed networks %(topology)s from VRF "
"%(old_vrf)s to VRF %(new_vrf)s"),
@ -2352,8 +2360,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.aim.delete(aim_ctx, epg)
epg.tenant_name = new_vrf.tenant_name
epg = self.aim.create(aim_ctx, epg)
self._set_network_epg(network_db.aim_mapping, epg)
# SVI network with auto l3out.
self._set_network_epg_and_notify(ctx,
network_db.aim_mapping,
epg)
# SVI network with auto l3out
elif network_db.aim_mapping.l3out_name:
l3out = self._get_network_l3out(network_db.aim_mapping)
old_l3out = self.aim.get(aim_ctx, l3out)
@ -2380,7 +2390,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
l3out = self.aim.update(aim_ctx, l3out,
vrf_name=new_vrf.name)
self._set_network_vrf(network_db.aim_mapping, new_vrf)
self._set_network_vrf_and_notify(ctx, network_db.aim_mapping,
new_vrf)
# All non-router ports on all networks in topology need to be
# notified since their BDs' VRFs and possibly their BDs' and
@ -2634,10 +2645,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
def get_aim_domains(self, aim_ctx):
vmms = [{'name': x.name, 'type': x.type}
for x in self.aim.find(aim_ctx, aim_resource.VMMDomain)
if x.type == utils.OPENSTACK_VMM_TYPE]
for x in self.aim.find(aim_ctx, aim_resource.VMMDomain)
if x.type == utils.OPENSTACK_VMM_TYPE]
phys = [{'name': x.name}
for x in self.aim.find(aim_ctx, aim_resource.PhysicalDomain)]
for x in self.aim.find(aim_ctx, aim_resource.PhysicalDomain)]
return vmms, phys
def _is_external(self, network):
@ -3506,3 +3517,165 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
n_constants.DEVICE_OWNER_ROUTER_INTF).
all())
return [p[0] for p in port_ids]
def _get_port_network_id(self, plugin_context, port_id):
port = self.plugin.get_port(plugin_context, port_id)
return port['network_id']
def _get_svi_default_external_epg(self, network):
if not network.get(cisco_apic.SVI):
return None
ext_net_dn = network.get(cisco_apic.DIST_NAMES, {}).get(
cisco_apic.EXTERNAL_NETWORK)
return aim_resource.ExternalNetwork.from_dn(ext_net_dn)
def _get_svi_net_l3out(self, network):
aim_ext_net = self._get_svi_default_external_epg(network)
if not aim_ext_net:
return None
return aim_resource.L3Outside(
tenant_name=aim_ext_net.tenant_name, name=aim_ext_net.l3out_name)
def _get_bd_by_network_id(self, session, network_id):
net_mapping = self._get_network_mapping(session, network_id)
return self._get_network_bd(net_mapping)
def _get_epg_by_network_id(self, session, network_id):
net_mapping = self._get_network_mapping(session, network_id)
return self._get_network_epg(net_mapping)
def _get_vrf_by_network(self, session, network):
vrf_dn = network.get(cisco_apic.DIST_NAMES, {}).get(cisco_apic.VRF)
if vrf_dn:
return aim_resource.VRF.from_dn(vrf_dn)
# Pre-existing EXT NET.
l3out = self._get_svi_net_l3out(network)
if l3out:
aim_ctx = aim_context.AimContext(db_session=session)
l3out = self.aim.get(aim_ctx, l3out)
# TODO(ivar): VRF could be in tenant common, there's no way of
# knowing it until we put the VRF in the mapping.
return aim_resource.VRF(tenant_name=l3out.tenant_name,
name=l3out.vrf_name)
net_mapping = self._get_network_mapping(session, network['id'])
return self._get_network_vrf(net_mapping)
def _get_port_static_path_and_encap(self, plugin_context, port):
port_id = port['id']
path = encap = None
if self._is_port_bound(port):
session = plugin_context.session
aim_ctx = aim_context.AimContext(db_session=session)
__, binding = n_db.get_locked_port_and_binding(session,
port_id)
levels = n_db.get_binding_levels(session, port_id,
binding.host)
network = self.plugin.get_network(
plugin_context, port['network_id'])
port_context = ml2_context.PortContext(
self, plugin_context, port, network, binding, levels)
host = port_context.host
segment = port_context.bottom_bound_segment
host_link_net_labels = self.aim.find(
aim_ctx, aim_infra.HostLinkNetworkLabel, host_name=host,
network_label=segment[api.PHYSICAL_NETWORK])
if host_link_net_labels:
for hl_net_label in host_link_net_labels:
interface = hl_net_label.interface_name
host_link = self.aim.find(
aim_ctx, aim_infra.HostLink, host_name=host,
interface_name=interface)
if not host_link or not host_link[0].path:
LOG.warning(
'No host link information found for host: '
'%(host)s, interface: %(interface)s',
{'host': host, 'interface': interface})
continue
path = host_link[0].path
if not path:
host_link = self.aim.find(aim_ctx, aim_infra.HostLink,
host_name=host)
if not host_link or not host_link[0].path:
LOG.warning(
'No host link information found for host %s', host)
return None, None
path = host_link[0].path
if segment:
if segment.get(api.NETWORK_TYPE) in [pconst.TYPE_VLAN]:
encap = 'vlan-%s' % segment[api.SEGMENTATION_ID]
else:
LOG.debug('Unsupported segmentation type for static path '
'binding: %s',
segment.get(api.NETWORK_TYPE))
encap = None
return path, encap
def _get_port_unique_domain(self, plugin_context, port):
"""Get port domain
Returns a unique domain (either virtual or physical) in which the
specific endpoint is placed. If the domain cannot be uniquely
identified returns None
:param plugin_context:
:param port:
:return:
"""
# TODO(ivar): at the moment, it's likely that this method won't
# return anything unique for the specific port. This is because we
# don't require users to specify domain mappings, and even if we did,
# such mappings are barely scoped by host, and each host could have
# at the very least one VMM and one Physical domain referring to it
# (HPB). However, every Neutron port can actually belong only to a
# single domain. We should implement a way to unequivocally retrieve
# that information.
session = plugin_context.session
aim_ctx = aim_context.AimContext(session)
if self._is_port_bound(port):
host_id = port[portbindings.HOST_ID]
dom_mappings = (self.aim.find(aim_ctx,
aim_infra.HostDomainMappingV2,
host_name=host_id) or
self.aim.find(aim_ctx,
aim_infra.HostDomainMappingV2,
host_name=DEFAULT_HOST_DOMAIN))
if not dom_mappings:
# If there's no direct mapping, get all the existing domains in
# AIM.
vmms, phys = self.get_aim_domains(aim_ctx)
for vmm in vmms:
dom_mappings.append(
aim_infra.HostDomainMappingV2(
domain_type=vmm['type'], domain_name=vmm['name'],
host_name=DEFAULT_HOST_DOMAIN))
for phy in phys:
dom_mappings.append(
aim_infra.HostDomainMappingV2(
domain_type='PhysDom', domain_name=phy['name'],
host_name=DEFAULT_HOST_DOMAIN))
if not dom_mappings or len(dom_mappings) > 1:
return None, None
return dom_mappings[0].domain_type, dom_mappings[0].domain_name
return None, None
def _add_network_mapping_and_notify(self, context, network_id, bd, epg,
vrf):
with context.session.begin(subtransactions=True):
self._add_network_mapping(context.session, network_id, bd, epg,
vrf)
registry.notify(sfc_cts.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self, context=context, network_id=network_id)
def _set_network_epg_and_notify(self, context, mapping, epg):
with context.session.begin(subtransactions=True):
self._set_network_epg(mapping, epg)
registry.notify(sfc_cts.GBP_NETWORK_EPG, events.PRECOMMIT_UPDATE,
self, context=context,
network_id=mapping.network_id)
def _set_network_vrf_and_notify(self, context, mapping, vrf):
with context.session.begin(subtransactions=True):
self._set_network_vrf(mapping, vrf)
registry.notify(sfc_cts.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self, context=context,
network_id=mapping.network_id)

View File

@ -0,0 +1,34 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(ivar): right now, the flowclassifier plugin doesn't support
# notifications right now. Adding our own using the proper resource name
# could be dangerous for compatibility once they suddenly start supporting
# them. We create our own resource type and make sure to modify it once
# support is added to the SFC project.
GBP_FLOW_CLASSIFIER = 'gbp_flowclassifier'
GBP_PORT = 'gbp_port'
LOGICAL_SRC_NET = 'logical_source_network'
LOGICAL_DST_NET = 'logical_destination_network'
AIM_FLC_L7_PARAMS = {
LOGICAL_SRC_NET: {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'validate': {'type:uuid_or_none': None}},
LOGICAL_DST_NET: {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'validate': {'type:uuid_or_none': None}}
}
AIM_FLC_PARAMS = ['source_ip_prefix', 'destination_ip_prefix']
GBP_NETWORK_VRF = 'gbp_network_vrf'
GBP_NETWORK_EPG = 'gbp_network_epg'

View File

@ -0,0 +1,114 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions
class UnsupportedConfiguration(exceptions.BadRequest):
message = _("Configuration %(conf)s for object of type %(type)s is not "
"supported.")
class PortPairsDifferentNetworkInGroup(exceptions.BadRequest):
message = _("For a Port Pair Group, ingress and egress networks should "
"be the same across Port Pairs. Example of valid "
"port pairs networks: [(N1, N2), (N1, N2), (N1, N2)]. "
"Invalid Example: [(N1, N2), (N1, N3), (N3, N2)]. "
"Port Pair Group ID: %(id)s")
class PortPairsUnsupportedDomain(exceptions.BadRequest):
message = _("Port Pair's ingress and egress port domains are unsupported "
"Please check the AIMCTL HostDomainMappingV2 "
"port pair ID: %(id)s supported domains: %(doms)s")
class PortPairsDifferentDomain(exceptions.BadRequest):
message = _("Port Pair's ingress and egress port can't be in different "
"domains. Please check the AIMCTL HostDomainMappingV2 "
"port pair ID: %(id)s")
class PortPairsNoUniqueDomain(exceptions.BadRequest):
message = _("Port Pair's ingress and egress port domains can't be "
"unequivocally identified. Please check the AIMCTL "
"HostDomainMappingV2 port pair ID: %(id)s")
class PortPairsSameNetwork(exceptions.BadRequest):
message = _("Port Pair's ingress and egress port can't be in the same "
"network. port pair ID: %(id)s")
class PortPairsInPortPairGroupDifferentDomain(exceptions.BadRequest):
message = _("Port Pairs in the same PPG must be in the same domain, "
"Please check the AIMCTL HostDomainMappingV2 "
"port pair ID: %(id)s")
class BadFlowClassifier(exceptions.BadRequest):
message = _("The following L7 parameters must be configured on Flow "
"Classifiers when using AIM driver: %(params)s")
class FlowClassifierSameSrcDstNetworks(exceptions.BadRequest):
message = _("Source and Destination networks must be different in Flow "
"Classifier.")
class RouterIDInUseByFlowClassifier(exceptions.BadRequest):
message = _("Router %(router_id)s is in use by Flow Classifier "
"%(flowc_id)s.")
class FlowClassifierInUseByAChain(exceptions.BadRequest):
message = _("Cannot update fields in flow classifier while in use by a "
"port chain. fields: %(fields)s port chain: %(pc_id)s")
class NoL3OutAssociatedToFlowcExternalNetwork(exceptions.BadRequest):
message = _("Cannot map flow classifier %(id)s, either its source or "
"destination network is external but has no L3Outside "
"associated to it.")
class NoPhysicalDomainSpecifiedInServiceEPG(exceptions.BadRequest):
message = _("No Physical Domain is specified in service EPG %(epg_id)s. ")
class MultipleVRFsDetectedInPortChain(exceptions.BadRequest):
message = _("Port Chain %(id)s spans across multiple VRFs. All providers, "
"consumers, and service BDs have to be in the same VRF.")
class FlowClassifierSrcDstNetworksDifferentTenants(exceptions.BadRequest):
message = _("Source and Destination networks for flow classifier %(id)s "
"are in different tenants. This is currently unsupported.")
class NetworkInUseByFlowClassifiers(exceptions.BadRequest):
message = _("Cannot delete network in use by classifiers %(ids)s")
class ServiceNetworkBadType(exceptions.BadRequest):
message = _("Service networks can't be SVI or External. "
"Port Pair ID: %(id)s")
class ConflictingNetworksDetectedInPortChain(exceptions.BadRequest):
message = _("Port Pair Groups in Port Chain cannot share any network. "
"%(id)s")
class DefaultExternalNetworkNotFound(exceptions.NotFound):
message = _("Default External Network not found for SVI network "
"%(id)s.")

View File

@ -0,0 +1,124 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_sfc.db import flowclassifier_db as flc_db
from networking_sfc.extensions import flowclassifier
from networking_sfc.services.flowclassifier.drivers import base
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron import manager
from neutron_lib.api import validators
from oslo_log import log as logging
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.sfc.aim import constants as sfc_cts
from gbpservice.neutron.services.sfc.aim import exceptions as sfc_exc
LOG = logging.getLogger(__name__)
flowclassifier.SUPPORTED_L7_PARAMETERS.update(sfc_cts.AIM_FLC_L7_PARAMS)
class FlowclassifierAIMDriverBase(base.FlowClassifierDriverBase):
def create_flow_classifier_precommit(self, context):
pass
def create_flow_classifier(self, context):
pass
def update_flow_classifier(self, context):
pass
def delete_flow_classifier(self, context):
pass
class FlowclassifierAIMDriver(FlowclassifierAIMDriverBase):
"""SFC Driver mapping for AIM."""
def initialize(self):
registry.subscribe(self._handle_network_delete, resources.NETWORK,
events.PRECOMMIT_DELETE)
self._core_plugin = None
@property
def plugin(self):
if not self._core_plugin:
self._core_plugin = manager.NeutronManager.get_plugin()
if not self._core_plugin:
LOG.error(_("No Core plugin found."))
raise exc.GroupPolicyDeploymentError()
return self._core_plugin
def create_flow_classifier_precommit(self, context):
self._validate_flow_classifier(context)
registry.notify(sfc_cts.GBP_FLOW_CLASSIFIER, events.PRECOMMIT_CREATE,
self, driver_context=context)
def update_flow_classifier_precommit(self, context):
self._validate_flow_classifier(context)
registry.notify(sfc_cts.GBP_FLOW_CLASSIFIER, events.PRECOMMIT_UPDATE,
self, driver_context=context)
def delete_flow_classifier_precommit(self, context):
registry.notify(sfc_cts.GBP_FLOW_CLASSIFIER, events.PRECOMMIT_DELETE,
self, driver_context=context)
def _validate_flow_classifier(self, context):
fc = context.current
# Verify L7 params are set
l7_p = fc['l7_parameters']
if any(x for x in sfc_cts.AIM_FLC_L7_PARAMS.keys()
if not validators.is_attr_set(l7_p.get(x))):
raise sfc_exc.BadFlowClassifier(
params=sfc_cts.AIM_FLC_L7_PARAMS.keys())
if l7_p[sfc_cts.LOGICAL_SRC_NET] == l7_p[sfc_cts.LOGICAL_DST_NET]:
raise sfc_exc.FlowClassifierSameSrcDstNetworks()
# Verify networks exist
self.plugin.get_network(context._plugin_context,
l7_p[sfc_cts.LOGICAL_SRC_NET])
self.plugin.get_network(context._plugin_context,
l7_p[sfc_cts.LOGICAL_DST_NET])
# Verify standard params are set
# TODO(ivar): src and dst prefix are needed only for SVI networks
if any(x for x in sfc_cts.AIM_FLC_PARAMS
if not validators.is_attr_set(fc.get(x))):
raise sfc_exc.BadFlowClassifier(params=sfc_cts.AIM_FLC_PARAMS)
# TODO(ivar): Any other parameter is unsupported, for now just
# unenforced.
# TODO(ivar): if source and destination ports are to a private network
# source/destination CIDRs are not required
# TODO(ivar): only one classifier can be provider (destination) if
# the network is not SVI.
def _get_classifiers_by_network_id(self, plugin_context, network_id):
context = plugin_context
with context.session.begin(subtransactions=True):
classifier_ids = []
for keyword in [sfc_cts.LOGICAL_SRC_NET, sfc_cts.LOGICAL_DST_NET]:
classifier_ids.extend(
[x.classifier_id for x in context.session.query(
flc_db.L7Parameter).filter_by(
keyword=keyword).filter_by(value=network_id).all()])
return classifier_ids
def _handle_network_delete(self, rtype, event, trigger, context,
network_id, **kwargs):
flc_ids = self._get_classifiers_by_network_id(context, network_id)
if flc_ids:
# TODO(ivar): instead of raising, we could try deleting the flow
# classifier, which would fail (and rollback the transaction) if
# in use.
raise sfc_exc.NetworkInUseByFlowClassifiers(ids=flc_ids)

View File

@ -0,0 +1,886 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from aim import aim_manager
from aim.api import resource as aim_resource
from aim.api import service_graph as aim_sg
from aim import context as aim_context
from aim import utils as aim_utils
from networking_sfc.db import sfc_db
from networking_sfc.extensions import flowclassifier as flowc_ext
from networking_sfc.extensions import sfc as sfc_ext
from networking_sfc.services.sfc.common import context as sfc_ctx
from networking_sfc.services.sfc.drivers import base
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron import manager
from neutron_lib import constants as n_constants
from oslo_log import log as logging
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.sfc.aim import constants as sfc_cts
from gbpservice.neutron.services.sfc.aim import exceptions
PBR_INGR_PREFIX = 'ingr_'
PBR_EGR_PREFIX = 'egr_'
INGRESS = 'ingress'
EGRESS = 'egress'
FLOWC_SRC = 'src'
FLOWC_DST = 'dst'
LOG = logging.getLogger(__name__)
PHYSDOM_TYPE = 'PhysDom'
SUPPORTED_DOM_TYPES = [PHYSDOM_TYPE]
class SfcAIMDriverBase(base.SfcDriverBase):
def delete_port_pair_group(self, context):
pass
def create_port_chain(self, context):
pass
def create_port_pair(self, context):
pass
def create_port_pair_group(self, context):
pass
def delete_port_pair(self, context):
pass
def delete_port_chain(self, context):
pass
def update_port_pair_group(self, context):
pass
def update_port_chain(self, context):
pass
def update_port_pair(self, context):
pass
class SfcAIMDriver(SfcAIMDriverBase):
"""SFC Driver mapping for AIM."""
def initialize(self):
# TODO(ivar): SFC resource mapping to APIC DNs
self._core_plugin = None
self._flowc_plugin = None
self._l3_plugin = None
self._sfc_plugin = None
self._aim_mech_driver = None
self._aim_flowc_driver = None
self.name_mapper = apic_mapper.APICNameMapper()
self.aim = aim_manager.AimManager()
# We don't care about deletion, that is managed by the database layer
# (can't delete a flowclassifier if in use).
for event in [events.PRECOMMIT_UPDATE, events.PRECOMMIT_CREATE]:
registry.subscribe(self._handle_flow_classifier,
sfc_cts.GBP_FLOW_CLASSIFIER, event)
registry.subscribe(self._handle_port_bound, sfc_cts.GBP_PORT,
events.PRECOMMIT_UPDATE)
registry.subscribe(self._handle_net_gbp_change,
sfc_cts.GBP_NETWORK_EPG, events.PRECOMMIT_UPDATE)
registry.subscribe(self._handle_net_gbp_change,
sfc_cts.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE)
@property
def plugin(self):
if not self._core_plugin:
self._core_plugin = manager.NeutronManager.get_plugin()
if not self._core_plugin:
LOG.error(_("No Core plugin found."))
raise exc.GroupPolicyDeploymentError()
return self._core_plugin
@property
def flowc_plugin(self):
if not self._flowc_plugin:
plugins = manager.NeutronManager.get_service_plugins()
self._flowc_plugin = plugins.get(flowc_ext.FLOW_CLASSIFIER_EXT)
if not self._flowc_plugin:
LOG.error(_("No FlowClassifier service plugin found."))
raise exc.GroupPolicyDeploymentError()
return self._flowc_plugin
@property
def l3_plugin(self):
if not self._l3_plugin:
plugins = manager.NeutronManager.get_service_plugins()
self._l3_plugin = plugins.get(n_constants.L3)
if not self._l3_plugin:
LOG.error(_("No L3 service plugin found."))
raise exc.GroupPolicyDeploymentError()
return self._l3_plugin
@property
def sfc_plugin(self):
if not self._sfc_plugin:
plugins = manager.NeutronManager.get_service_plugins()
self._sfc_plugin = plugins.get(sfc_ext.SFC_EXT)
if not self._sfc_plugin:
LOG.error(_("No SFC service plugin found."))
raise exc.GroupPolicyDeploymentError()
return self._sfc_plugin
@property
def aim_mech(self):
if not self._aim_mech_driver:
try:
self._aim_mech_driver = (
self.plugin.mechanism_manager.mech_drivers['apic_aim'].obj)
except (KeyError, AttributeError):
LOG.error(_("No AIM driver found"))
raise exc.GroupPolicyDeploymentError()
return self._aim_mech_driver
@property
def aim_flowc(self):
if not self._aim_flowc_driver:
try:
self._aim_flowc_driver = (
self.flowc_plugin.driver_manager.drivers['aim'].obj)
except (KeyError, AttributeError):
LOG.error(_("No AIM flowc driver found"))
raise exc.GroupPolicyDeploymentError()
return self._aim_flowc_driver
def create_port_pair_precommit(self, context):
"""Map Port Pair to AIM model
A Port Pair by itself doesn't need to generate AIM model at least
until added to a Port Pair Group.
:param context:
:return:
"""
self._validate_port_pair(context)
def update_port_pair_precommit(self, context, remap=False):
self._validate_port_pair(context)
p_ctx = context._plugin_context
# Remap the affected groups if needed.
if remap or self._should_regenerate_pp(context):
for group in self._get_groups_by_pair_id(p_ctx,
context.current['id']):
# Curr and original are identical, so the same object gets
# remapped.
g_ctx = sfc_ctx.PortPairGroupContext(context._plugin, p_ctx,
group, group)
self.update_port_pair_group_precommit(g_ctx, remap=True)
def delete_port_pair_precommit(self, context):
# NOTE(ivar): DB layer prevents port pair deletion when in use by a
# port pair group.
pass
def create_port_pair_group_precommit(self, context):
"""Map port pair group to AIM model
A Port Pair Group is the equivalent of a Logical Device in AIM.
:param context:
:return:
"""
self._validate_port_pair_group(context)
def update_port_pair_group_precommit(self, context, remap=False):
self._validate_port_pair_group(context)
# Remap Port Chain if needed
if remap or self._should_regenerate_ppg(context):
for chain in self._get_chains_by_ppg_id(context._plugin_context,
context.current['id']):
c_ctx = sfc_ctx.PortChainContext(
context._plugin, context._plugin_context, chain, chain)
self.update_port_chain_precommit(c_ctx, remap=True)
def delete_port_pair_group_precommit(self, context):
# NOTE(ivar): DB layer prevents deletion when used by port chains
pass
def create_port_chain_precommit(self, context):
pc = context.current
p_ctx = context._plugin_context
flowcs, ppgs = self._get_pc_flowcs_and_ppgs(p_ctx, pc)
self._validate_port_chain(context._plugin_context, context.current,
flowcs, ppgs)
self._map_port_chain(p_ctx, pc, flowcs, ppgs)
def update_port_chain_precommit(self, context, remap=False):
p_ctx = context._plugin_context
flowcs, ppgs = self._get_pc_flowcs_and_ppgs(p_ctx, context.current)
self._validate_port_chain(context._plugin_context, context.current,
flowcs, ppgs)
# Regenerate Port Chain Model
if remap or self._should_regenerate_pc(context):
o_flowcs, o_ppgs = self._get_pc_flowcs_and_ppgs(p_ctx,
context.original)
self._delete_port_chain_mapping(p_ctx, context.original, o_flowcs,
o_ppgs)
self._map_port_chain(p_ctx, context.current, flowcs, ppgs)
def delete_port_chain_precommit(self, context):
p_ctx = context._plugin_context
flowcs, ppgs = self._get_pc_flowcs_and_ppgs(p_ctx, context.current)
self._delete_port_chain_mapping(p_ctx, context.current, flowcs, ppgs)
def _validate_port_pair(self, context):
# Ports need to belong to distinct networks
p_ctx = context._plugin_context
ingress_port = self.plugin.get_port(p_ctx, context.current['ingress'])
egress_port = self.plugin.get_port(p_ctx, context.current['egress'])
ingress_net = ingress_port['network_id']
egress_net = egress_port['network_id']
if ingress_net == egress_net:
raise exceptions.PortPairsSameNetwork(id=context.current['id'])
igress_dom = self.aim_mech._get_port_unique_domain(p_ctx, ingress_port)
egress_dom = self.aim_mech._get_port_unique_domain(p_ctx, egress_port)
if igress_dom != egress_dom:
raise exceptions.PortPairsDifferentDomain(id=context.current['id'])
if any(x for x in [igress_dom, egress_dom] if x == (None, None)):
raise exceptions.PortPairsNoUniqueDomain(id=context.current['id'])
# Ensure domain types supported
if igress_dom[0] not in SUPPORTED_DOM_TYPES:
raise exceptions.PortPairsUnsupportedDomain(
id=context.current['id'], doms=SUPPORTED_DOM_TYPES)
ingress_net = self.plugin.get_network(p_ctx, ingress_net)
egress_net = self.plugin.get_network(p_ctx, egress_net)
if ingress_net.get('apic:svi') or egress_net.get(
'apic:svi') or ingress_net.get(
'router:external') or egress_net.get('router:external'):
raise exceptions.ServiceNetworkBadType(id=context.current['id'])
def _validate_port_pair_group(self, context):
# Verify all ports are in the same network for each side of the
# connection
p_ctx = context._plugin_context
port_pairs = context._plugin.get_port_pairs(
p_ctx, filters={'id': context.current['port_pairs']})
domains = set()
net_pairs = set()
for port_pair in port_pairs:
ingress_port = self.plugin.get_port(p_ctx, port_pair['ingress'])
egress_port = self.plugin.get_port(p_ctx, port_pair['egress'])
domains.add(self.aim_mech._get_port_unique_domain(p_ctx,
ingress_port))
if len(domains) > 1:
raise exceptions.PortPairsInPortPairGroupDifferentDomain(
id=context.current['id'])
net_pairs.add((ingress_port['network_id'],
egress_port['network_id']))
if len(net_pairs) > 1:
raise exceptions.PortPairsDifferentNetworkInGroup(
id=context.current['id'])
def _validate_port_chain(self, p_ctx, pc, flowcs, ppgs):
# - All networks in play (prov, cons, services) are in the same VRF,
# also listen to events to prevent VRF to change after the fact
# alternatively, an ERROR status can be raised
# - TEMPORARY: provider and consumer EPGs are in the same tenant, this
# can be removed once contract export is implemented.
# TODO(ivar): two different chains cannot share left/right networks
# TODO(ivar): right/left BDs same tenant as provider
vrfs = set()
for flowc in flowcs:
provg = self._get_flowc_provider_group(p_ctx, flowc)
consg = self._get_flowc_consumer_group(p_ctx, flowc)
provrf = self._get_flowc_provider_vrf(p_ctx, flowc)
convrf = self._get_flowc_consumer_vrf(p_ctx, flowc)
vrfs.add(provrf.dn)
vrfs.add(convrf.dn)
if provg.tenant_name != consg.tenant_name:
raise exceptions.FlowClassifierSrcDstNetworksDifferentTenants(
id=flowc['id'])
if len(vrfs) > 1:
raise exceptions.MultipleVRFsDetectedInPortChain(id=pc['id'])
nets = set()
net_count = 0
for ppg in ppgs:
lvrf, rvrf = self._get_ppg_left_right_vrfs(p_ctx, ppg)
# REVISIT(ivar): current ACI limitation, we can't put subsequent
# nodes of the same chain in the same networks
lnet, rnet = self._get_ppg_left_right_network_ids(p_ctx, ppg)
nets.add(lnet)
nets.add(rnet)
net_count += 2
vrfs.add(lvrf.dn)
vrfs.add(rvrf.dn)
if len(vrfs) > 1:
raise exceptions.MultipleVRFsDetectedInPortChain(id=pc['id'])
if len(nets) < net_count:
raise exceptions.ConflictingNetworksDetectedInPortChain(
id=pc['id'])
def _map_port_pair_group(self, plugin_context, ppg, tenant):
session = plugin_context.session
aim_ctx = aim_context.AimContext(session)
# Create Logical device model, container for all the PPG port pairs.
dc = self._get_ppg_device_cluster(session, ppg, tenant)
type, domain = self._get_ppg_domain(plugin_context, ppg)
if not type and not domain:
raise exceptions.PortPairsNoUniqueDomain(id=ppg['port_pairs'])
if type == PHYSDOM_TYPE:
dc.device_type = 'PHYSICAL'
dc.physical_domain_name = domain
else:
dc.device_type = 'VIRTUAL'
dc.vmm_domain = [{'type': type, 'name': domain}]
self.aim.create(aim_ctx, dc)
# For each port pair, create the corresponding Concrete Devices
# (represented by the static path of each interface)
ingress_cdis = []
egress_cdis = []
port_pairs = self.sfc_plugin.get_port_pairs(
plugin_context, filters={'id': ppg['port_pairs']})
for pp in port_pairs:
ingress_port = self.plugin.get_port(plugin_context, pp['ingress'])
egress_port = self.plugin.get_port(plugin_context, pp['egress'])
pp_id = self.name_mapper.port_pair(session, pp['id'])
pp_name = aim_utils.sanitize_display_name(ppg['name'])
cd = aim_sg.ConcreteDevice(
tenant_name=dc.tenant_name, device_cluster_name=dc.name,
name=pp_id, display_name=pp_name)
# Create ConcreteDevice
self.aim.create(aim_ctx, cd)
for p, store in [(ingress_port, ingress_cdis),
(egress_port, egress_cdis)]:
p_id = self.name_mapper.port(session, p['id'])
p_name = aim_utils.sanitize_display_name(p['name'])
path, encap = self.aim_mech._get_port_static_path_and_encap(
plugin_context, p)
if path is None:
LOG.warning("Path not found for Port Pair %s member %s ",
"Port might be unbound.", pp['id'], p['id'])
continue
# TODO(ivar): what if encap is None? is that an Opflex port?
# Create Concrete Device Interface
cdi = aim_sg.ConcreteDeviceInterface(
tenant_name=cd.tenant_name,
device_cluster_name=cd.device_cluster_name,
device_name=cd.name, name=p_id, display_name=p_name,
path=path)
cdi = self.aim.create(aim_ctx, cdi)
store.append((cdi, encap, p))
# Ingress and Egress CDIs have the same length.
# All the ingress devices must be load balances, and so the egress
# (for reverse path). Create the proper PBR policies as well as
# the Logical Interfaces (which see all the physical interfaces of a
# specific direction as they were one).
internal_dci = aim_sg.DeviceClusterInterface(
tenant_name=dc.tenant_name, device_cluster_name=dc.name,
name=INGRESS, display_name=INGRESS)
external_dci = aim_sg.DeviceClusterInterface(
tenant_name=dc.tenant_name, device_cluster_name=dc.name,
name=EGRESS, display_name=EGRESS)
# Create 2 PBR rules per PPG, one per direction.
ipbr = self._get_ppg_service_redirect_policy(session, ppg, INGRESS,
tenant)
epbr = self._get_ppg_service_redirect_policy(session, ppg, EGRESS,
tenant)
for i in range(len(ingress_cdis)):
icdi, iencap, iport = ingress_cdis[i]
ecdi, eencap, eport = egress_cdis[i]
internal_dci.encap = iencap
external_dci.encap = eencap
internal_dci.concrete_interfaces.append(icdi.dn)
external_dci.concrete_interfaces.append(ecdi.dn)
if iport['fixed_ips']:
ipbr.destinations.append(
{'ip': iport['fixed_ips'][0]['ip_address'],
'mac': iport['mac_address']})
if eport['fixed_ips']:
epbr.destinations.append(
{'ip': eport['fixed_ips'][0]['ip_address'],
'mac': eport['mac_address']})
self.aim.create(aim_ctx, internal_dci)
self.aim.create(aim_ctx, external_dci)
self.aim.create(aim_ctx, ipbr)
self.aim.create(aim_ctx, epbr)
def _delete_port_pair_group_mapping(self, plugin_context, ppg, tenant):
# Just delete cascade the DeviceCluster and PBR policies
session = plugin_context.session
aim_ctx = aim_context.AimContext(session)
dc = self._get_ppg_device_cluster(session, ppg, tenant)
self.aim.delete(aim_ctx, dc, cascade=True)
for prefix in [PBR_INGR_PREFIX, PBR_EGR_PREFIX]:
pbr_id = self.name_mapper.port_pair_group(session, ppg['id'],
prefix=prefix)
self.aim.delete(
aim_ctx, aim_sg.ServiceRedirectPolicy(
tenant_name=dc.tenant_name, name=pbr_id), cascade=True)
def _map_port_chain(self, plugin_context, pc, flowcs, ppgs):
# Create one DeviceClusterContext per PPG
p_ctx = plugin_context
aim_ctx = aim_context.AimContext(p_ctx.session)
# For each flow classifier, there are as many DeviceClusterContext as
# the number of nodes in the chain.
p_tenants = set()
for flc in flowcs:
p_tenant = self._get_flowc_provider_group(plugin_context,
flc).tenant_name
sg = self._get_pc_service_graph(p_ctx.session, pc, p_tenant)
contract = self._get_flc_contract(p_ctx.session, flc, p_tenant)
subject = aim_resource.ContractSubject(
tenant_name=contract.tenant_name, contract_name=contract.name,
name=sg.name, service_graph_name=sg.name,
bi_filters=[self.aim_mech._any_filter_name])
self.aim.create(aim_ctx, contract)
self.aim.create(aim_ctx, subject)
self._map_flow_classifier(p_ctx, flc, p_tenant)
# Map device clusters for each flow tenant
if p_tenant not in p_tenants:
for ppg in ppgs:
dc = self._get_ppg_device_cluster(p_ctx.session, ppg,
p_tenant)
self._map_port_pair_group(plugin_context, ppg, p_tenant)
dcc = aim_sg.DeviceClusterContext(
tenant_name=sg.tenant_name, contract_name="any",
service_graph_name=sg.name, node_name=dc.name,
display_name=dc.display_name,
device_cluster_name=dc.name,
device_cluster_tenant_name=dc.tenant_name)
dcc = self.aim.create(aim_ctx, dcc)
# Create device context interfaces.
left_bd, right_bd = self._get_ppg_left_right_bds(p_ctx,
ppg)
for conn_name, direction, bd in [
('provider', EGRESS, right_bd),
('consumer', INGRESS, left_bd)]:
dci = aim_sg.DeviceClusterInterface(
tenant_name=dc.tenant_name,
device_cluster_name=dc.name, name=direction)
pbr = self._get_ppg_service_redirect_policy(
p_ctx.session, ppg, direction, p_tenant)
dcic = aim_sg.DeviceClusterInterfaceContext(
tenant_name=dcc.tenant_name,
contract_name=dcc.contract_name,
service_graph_name=dcc.service_graph_name,
node_name=dcc.node_name, connector_name=conn_name,
display_name=dcc.display_name,
bridge_domain_dn=bd.dn,
device_cluster_interface_dn=dci.dn,
service_redirect_policy_dn=pbr.dn)
self.aim.create(aim_ctx, dcic)
sg.linear_chain_nodes.append(
{'name': dc.name, 'device_cluster_name': dc.name,
'device_cluster_tenant_name': dc.tenant_name})
# Unsync left-right EPGs
for epg in self._get_ppg_left_right_epgs(p_ctx, ppg):
self.aim.update(aim_ctx, epg, sync=False)
# Create only once per tenant
self.aim.create(aim_ctx, sg)
p_tenants.add(p_tenant)
def _delete_port_chain_mapping(self, plugin_context, pc, flowcs, ppgs):
p_ctx = plugin_context
session = p_ctx.session
aim_ctx = aim_context.AimContext(session)
deleted_ppgs = set()
for flc in flowcs:
tenant = self._get_flowc_provider_group(plugin_context,
flc).tenant_name
for ppg in ppgs:
key = (tenant, ppg['id'])
if key not in deleted_ppgs:
self._delete_port_pair_group_mapping(p_ctx, ppg, tenant)
deleted_ppgs.add(key)
self._delete_flow_classifier_mapping(p_ctx, flc, tenant)
contract = self._get_flc_contract(p_ctx.session, flc, tenant)
sg = self._get_pc_service_graph(p_ctx.session, pc, tenant)
self.aim.delete(aim_ctx, contract, cascade=True)
self.aim.delete(aim_ctx, sg, cascade=True)
for ppg_id in pc['port_pair_groups']:
ppg_aid = self.name_mapper.port_pair_group(session, ppg_id)
dcc = aim_sg.DeviceClusterContext(
tenant_name=tenant, contract_name="any",
service_graph_name=sg.name, node_name=ppg_aid)
self.aim.delete(aim_ctx, dcc, cascade=True)
processed_networks = set()
# deleted ppgs contains all the ppgs' ID
processed_ppgs = deleted_ppgs
for ppg in ppgs:
for net_id in self._get_ppg_left_right_network_ids(p_ctx, ppg):
if net_id in processed_networks:
continue
processed_networks.add(net_id)
# See if there are more chains on these networks
for group_id in self._get_group_ids_by_network_id(p_ctx,
net_id):
if group_id in processed_ppgs:
# Nothing to do
continue
processed_ppgs.add(group_id)
for chain in self._get_chains_by_ppg_id(p_ctx, group_id):
if chain['id'] != pc['id']:
# This network is in use by some chain, cannot
# re-activate EPG
break
else:
# No chain associated to this group ID
continue
break
else:
# No chain associated to all the groups of this network
epg = self.aim_mech._get_epg_by_network_id(p_ctx.session,
net_id)
self.aim.update(aim_ctx, epg, sync=True)
def _map_flow_classifier(self, plugin_context, flowc, tenant):
"""Map flowclassifier to AIM model
If source/destination ports are plugged to external networks, create
AIM external EPGs in the proper L3Outs and set the corresponding
source/destination ip prefix.
:param context:
:return:
"""
aim_ctx = aim_context.AimContext(plugin_context.session)
cons_group = self._get_flowc_consumer_group(plugin_context, flowc)
prov_group = self._get_flowc_provider_group(plugin_context, flowc)
contract = self._get_flc_contract(plugin_context.session, flowc,
tenant)
# TODO(ivar): if provider/consumer are in different tenants, export
# the contract
cons_group.consumed_contract_names.append(contract.name)
prov_group.provided_contract_names.append(contract.name)
self.aim.create(aim_ctx, cons_group, overwrite=True)
self.aim.create(aim_ctx, prov_group, overwrite=True)
def _map_flowc_network_group(self, plugin_context, net, cidr, flowc,
prefix):
flc_aid = self._get_external_group_aim_name(plugin_context, flowc,
prefix)
flc_aname = aim_utils.sanitize_display_name(flowc['name'])
aim_ctx = aim_context.AimContext(plugin_context.session)
cidr = netaddr.IPNetwork(cidr)
l3out = self.aim_mech._get_svi_net_l3out(net)
if l3out:
if cidr.prefixlen == 0:
# Use default External Network
ext_net = self.aim_mech._get_svi_default_external_epg(net)
ext_net_db = self.aim.get(aim_ctx, ext_net)
if not ext_net_db:
raise exceptions.DefaultExternalNetworkNotFound(
id=net['id'])
else:
# Create ExternalNetwork and ExternalSubnet on the proper
# L3Out. Return the External network
ext_net = aim_resource.ExternalNetwork(
tenant_name=l3out.tenant_name, l3out_name=l3out.name,
name=flc_aid, display_name=flc_aname)
ext_sub = aim_resource.ExternalSubnet(
tenant_name=ext_net.tenant_name,
l3out_name=ext_net.l3out_name,
external_network_name=ext_net.name, cidr=str(cidr))
ext_net_db = self.aim.get(aim_ctx, ext_net)
if not ext_net_db:
ext_net_db = self.aim.create(aim_ctx, ext_net)
ext_sub_db = self.aim.get(aim_ctx, ext_sub)
if not ext_sub_db:
self.aim.create(aim_ctx, ext_sub)
return ext_net_db
else:
return self.aim_mech._get_epg_by_network_id(plugin_context.session,
net['id'])
def _delete_flow_classifier_mapping(self, plugin_context, flowc, tenant):
source_net = self._get_flowc_src_network(plugin_context, flowc)
dest_net = self._get_flowc_dst_network(plugin_context, flowc)
self._delete_flowc_network_group_mapping(
plugin_context, source_net, flowc, tenant,
flowc['source_ip_prefix'], FLOWC_SRC)
self._delete_flowc_network_group_mapping(
plugin_context, dest_net, flowc, tenant,
flowc['destination_ip_prefix'], FLOWC_DST)
def _delete_flowc_network_group_mapping(self, plugin_context, net, flowc,
tenant, cidr, prefix=''):
flc_aid = self._get_external_group_aim_name(plugin_context, flowc,
prefix)
flc_aname = aim_utils.sanitize_display_name(flowc['name'])
aim_ctx = aim_context.AimContext(plugin_context.session)
l3out = self.aim_mech._get_svi_net_l3out(net)
cidr = netaddr.IPNetwork(cidr)
epg = None
if l3out:
if cidr.prefixlen != 0:
ext_net = aim_resource.ExternalNetwork(
tenant_name=l3out.tenant_name, l3out_name=l3out.name,
name=flc_aid, display_name=flc_aname)
self.aim.delete(aim_ctx, ext_net, cascade=True)
else:
ext_net = self.aim_mech._get_svi_default_external_epg(net)
epg = self.aim.get(aim_ctx, ext_net)
else:
epg = self.aim.get(aim_ctx, self.aim_mech._get_epg_by_network_id(
plugin_context.session, net['id']))
if epg:
contract = self._get_flc_contract(plugin_context.session, flowc,
tenant)
try:
if prefix == FLOWC_SRC:
epg.consumed_contract_names.remove(contract.name)
else:
epg.provided_contract_names.remove(contract.name)
self.aim.create(aim_ctx, epg, overwrite=True)
except ValueError:
pass
def _get_chains_by_classifier_id(self, plugin_context, flowc_id):
context = plugin_context
with context.session.begin(subtransactions=True):
chain_ids = [x.portchain_id for x in context.session.query(
sfc_db.ChainClassifierAssoc).filter_by(
flowclassifier_id=flowc_id).all()]
return self.sfc_plugin.get_port_chains(plugin_context,
filters={'id': chain_ids})
def _get_chains_by_ppg_id(self, plugin_context, ppg_id):
context = plugin_context
with context.session.begin(subtransactions=True):
chain_ids = [x.portchain_id for x in context.session.query(
sfc_db.ChainGroupAssoc).filter_by(
portpairgroup_id=ppg_id).all()]
return self.sfc_plugin.get_port_chains(plugin_context,
filters={'id': chain_ids})
def _get_groups_by_pair_id(self, plugin_context, pp_id):
# NOTE(ivar): today, port pair can be associated only to one PPG
context = plugin_context
with context.session.begin(subtransactions=True):
pp_db = self.sfc_plugin._get_port_pair(plugin_context, pp_id)
if pp_db and pp_db.portpairgroup_id:
return self.sfc_plugin.get_port_pair_groups(
plugin_context, filters={'id': [pp_db.portpairgroup_id]})
return []
def _get_group_ids_by_network_id(self, plugin_context, network_id):
ports = self.plugin.get_ports(plugin_context,
filters={'network_id': [network_id]})
port_ids = [x['id'] for x in ports]
pps = self.sfc_plugin.get_port_pairs(plugin_context,
filters={'ingress': port_ids})
pps.extend(self.sfc_plugin.get_port_pairs(
plugin_context, filters={'egress': port_ids}))
group_ids = set()
for pp in pps:
pp_db = self.sfc_plugin._get_port_pair(plugin_context, pp['id'])
group_ids.add(pp_db.portpairgroup_id)
return list(group_ids)
def _should_regenerate_pp(self, context):
attrs = [INGRESS, EGRESS, 'name']
return any(context.current[a] != context.original[a] for a in attrs)
def _should_regenerate_ppg(self, context):
attrs = ['port_pairs', 'name']
return any(context.current[a] != context.original[a] for a in attrs)
def _should_regenerate_pc(self, context):
attrs = ['flow_classifiers', 'port_pair_groups', 'name']
return any(context.current[a] != context.original[a] for a in attrs)
def _get_ppg_device_cluster(self, session, ppg, tenant):
tenant_aid = tenant
ppg_aid = self.name_mapper.port_pair_group(session, ppg['id'])
ppg_aname = aim_utils.sanitize_display_name(ppg['name'])
return aim_sg.DeviceCluster(tenant_name=tenant_aid, name=ppg_aid,
display_name=ppg_aname, managed=False)
def _get_ppg_domain(self, plugin_context, ppg):
pp = self.sfc_plugin.get_port_pair(plugin_context,
ppg['port_pairs'][0])
ingress_port = self.plugin.get_port(plugin_context, pp['ingress'])
return self.aim_mech._get_port_unique_domain(plugin_context,
ingress_port)
def _get_pc_service_graph(self, session, pc, tenant):
tenant_aid = tenant
pc_aid = self.name_mapper.port_chain(session, pc['id'])
pc_aname = aim_utils.sanitize_display_name(pc['name'])
return aim_sg.ServiceGraph(tenant_name=tenant_aid, name=pc_aid,
display_name=pc_aname)
def _get_flc_contract(self, session, flc, tenant):
tenant_id = tenant
flc_aid = self.name_mapper.flow_classifier(session, flc['id'])
flc_aname = aim_utils.sanitize_display_name(flc['name'])
return aim_resource.Contract(tenant_name=tenant_id, name=flc_aid,
display_name=flc_aname)
def _get_ppg_service_redirect_policy(self, session, ppg, direction,
tenant):
if direction == INGRESS:
prfx = PBR_INGR_PREFIX
elif direction == EGRESS:
prfx = PBR_EGR_PREFIX
dc = self._get_ppg_device_cluster(session, ppg, tenant)
pbr_id = self.name_mapper.port_pair_group(session, ppg['id'],
prefix=prfx)
return aim_sg.ServiceRedirectPolicy(tenant_name=dc.tenant_name,
name=pbr_id)
def _get_ppg_left_right_network_ids(self, plugin_context, ppg):
pps = self.sfc_plugin.get_port_pairs(plugin_context,
filters={'id': ppg['port_pairs']})
for pp in pps:
ingress = self.plugin.get_port(plugin_context, pp['ingress'])
egress = self.plugin.get_port(plugin_context, pp['egress'])
return ingress['network_id'], egress['network_id']
def _get_ppg_left_right_epgs(self, plugin_context, ppg):
leftn, rightn = self._get_ppg_left_right_network_ids(plugin_context,
ppg)
ingress_epg = self.aim_mech._get_epg_by_network_id(
plugin_context.session, leftn)
egress_epg = self.aim_mech._get_epg_by_network_id(
plugin_context.session, rightn)
# Every port pair will return the same result
return ingress_epg, egress_epg
def _get_ppg_left_right_bds(self, plugin_context, ppg):
leftn, rightn = self._get_ppg_left_right_network_ids(plugin_context,
ppg)
ingress_bd = self.aim_mech._get_bd_by_network_id(
plugin_context.session, leftn)
egress_bd = self.aim_mech._get_bd_by_network_id(
plugin_context.session, rightn)
# Every port pair will return the same result
return ingress_bd, egress_bd
def _get_ppg_left_right_vrfs(self, plugin_context, ppg):
leftn, rightn = self._get_ppg_left_right_network_ids(plugin_context,
ppg)
leftn = self.plugin.get_network(plugin_context, leftn)
rightn = self.plugin.get_network(plugin_context, rightn)
ingress_vrf = self.aim_mech._get_vrf_by_network(plugin_context.session,
leftn)
egress_vrf = self.aim_mech._get_vrf_by_network(plugin_context.session,
rightn)
# Every port pair will return the same result
return ingress_vrf, egress_vrf
def _handle_flow_classifier(self, rtype, event, trigger, driver_context,
**kwargs):
if event == events.PRECOMMIT_UPDATE:
current = driver_context.current
original = driver_context.original
pctx = driver_context._plugin_context
l7_curr = current['l7_parameters']
l7_orig = original['l7_parameters']
if (any(current[x] != original[x] for x in sfc_cts.AIM_FLC_PARAMS)
or any(l7_curr[x] != l7_orig[x] for x in
sfc_cts.AIM_FLC_L7_PARAMS.keys())):
# reject if in use
for chain in self._get_chains_by_classifier_id(pctx,
current['id']):
raise exceptions.FlowClassifierInUseByAChain(
fields=(sfc_cts.AIM_FLC_L7_PARAMS.keys() +
sfc_cts.AIM_FLC_PARAMS), pc_id=chain['id'])
def _handle_port_bound(self, rtype, event, trigger, driver_context,
**kwargs):
if event == events.PRECOMMIT_UPDATE:
context = driver_context
p_ctx = driver_context._plugin_context
c_host = context.host
o_host = context.original_host
if c_host and (c_host != o_host):
pps = self.sfc_plugin.get_port_pairs(
p_ctx, filters={'ingress': [driver_context.current['id']]})
pps.extend(self.sfc_plugin.get_port_pairs(
p_ctx, filters={'egress': [driver_context.current['id']]}))
for pp in pps:
d_ctx = sfc_ctx.PortPairContext(context._plugin, p_ctx, pp,
pp)
self.update_port_pair_precommit(d_ctx, remap=True)
def _handle_net_gbp_change(self, rtype, event, trigger, context,
network_id, **kwargs):
chains = {}
ppg_ids = self._get_group_ids_by_network_id(context, network_id)
flowc_ids = self.aim_flowc._get_classifiers_by_network_id(
context, network_id)
for flowc_id in flowc_ids:
for chain in self._get_chains_by_classifier_id(context,
flowc_id):
chains[chain['id']] = chain
if rtype == sfc_cts.GBP_NETWORK_VRF:
# Don't need to check PPGs if the EPG is changing
for ppg_id in ppg_ids:
for chain in self._get_chains_by_ppg_id(context, ppg_id):
chains[chain['id']] = chain
for chain in chains.values():
flowcs, ppgs = self._get_pc_flowcs_and_ppgs(context, chain)
self._validate_port_chain(context, chain, flowcs, ppgs)
def _get_flowc_src_network(self, plugin_context, flowc):
return self.plugin.get_network(
plugin_context, flowc['l7_parameters'][sfc_cts.LOGICAL_SRC_NET])
def _get_flowc_dst_network(self, plugin_context, flowc):
return self.plugin.get_network(
plugin_context, flowc['l7_parameters'][sfc_cts.LOGICAL_DST_NET])
def _get_pc_flowcs_and_ppgs(self, plugin_context, pc):
flowcs = self.flowc_plugin.get_flow_classifiers(
plugin_context, filters={'id': pc['flow_classifiers']})
unordered_ppgs = self.sfc_plugin.get_port_pair_groups(
plugin_context, filters={'id': pc['port_pair_groups']})
# Keep order
ppgs = []
for ppg_id in pc['port_pair_groups']:
for ppg in unordered_ppgs:
if ppg['id'] == ppg_id:
ppgs.append(ppg)
break
return flowcs, ppgs
def _get_flowc_provider_group(self, plugin_context, flowc):
aim_ctx = aim_context.AimContext(plugin_context.session)
net = self._get_flowc_dst_network(plugin_context, flowc)
return self.aim.get(aim_ctx, self._map_flowc_network_group(
plugin_context, net, flowc['destination_ip_prefix'], flowc,
FLOWC_DST))
def _get_flowc_consumer_group(self, plugin_context, flowc):
aim_ctx = aim_context.AimContext(plugin_context.session)
net = self._get_flowc_src_network(plugin_context, flowc)
return self.aim.get(aim_ctx, self._map_flowc_network_group(
plugin_context, net, flowc['source_ip_prefix'], flowc, FLOWC_SRC))
def _get_flowc_provider_vrf(self, plugin_context, flowc):
net = self._get_flowc_dst_network(plugin_context, flowc)
return self.aim_mech._get_vrf_by_network(plugin_context.session, net)
def _get_flowc_consumer_vrf(self, plugin_context, flowc):
net = self._get_flowc_src_network(plugin_context, flowc)
return self.aim_mech._get_vrf_by_network(plugin_context.session, net)
def _get_external_group_aim_name(self, plugin_context, flowc, prefix):
if prefix == FLOWC_SRC:
cidr = flowc['source_ip_prefix']
net = self._get_flowc_src_network(plugin_context, flowc)
else:
cidr = flowc['destination_ip_prefix']
net = self._get_flowc_dst_network(plugin_context, flowc)
cidr = aim_utils.sanitize_display_name(cidr)
return self.name_mapper.network(plugin_context.session, net['id'],
prefix=cidr + '_')

View File

@ -22,6 +22,7 @@ from neutron import context
from neutron import manager
from neutron.plugins.common import constants
from neutron import policy
from neutron.services.trunk.rpc import server as trunk_server
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2
from oslo_utils import importutils
@ -36,6 +37,8 @@ from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_constants)
import gbpservice.neutron.tests
from gbpservice.neutron.tests.unit import common as cm
from networking_sfc.extensions import flowclassifier
from networking_sfc.extensions import sfc
JSON_FORMAT = 'json'
@ -78,34 +81,33 @@ class ApiManagerMixin(object):
sorted([i[resource]['id'] for i in items]))
def _create_resource(self, type, expected_res_status=None,
is_admin_context=False, **kwargs):
is_admin_context=False, deserialize=True, **kwargs):
plural = cm.get_resource_plural(type)
defaults_func = getattr(cm, 'get_create_%s_default_attrs' % type,
None)
defaults = {}
if defaults_func:
defaults = defaults_func()
defaults.update(kwargs)
type = type.split('/')[-1]
try:
defaults = getattr(cm, 'get_create_%s_default_attrs' % type)()
defaults.update(kwargs)
except AttributeError:
defaults = kwargs
data = {type: {'tenant_id': self._tenant_id}}
data[type].update(defaults)
req = self.new_create_request(plural, data, self.fmt)
req.environ['neutron.context'] = context.Context(
'', kwargs.get('tenant_id', self._tenant_id) if not
is_admin_context else self._tenant_id, is_admin_context)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
elif res.status_int >= webob.exc.HTTPClientError.code:
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
return self.deserialize(self.fmt, res) if deserialize else res
def _update_resource(
self, id, type, expected_res_status=None, is_admin_context=False,
api=None, **kwargs):
def _update_resource(self, id, type, expected_res_status=None,
is_admin_context=False, api=None, deserialize=True,
**kwargs):
plural = cm.get_resource_plural(type)
type = type.split('/')[-1]
data = {type: kwargs}
tenant_id = kwargs.pop('tenant_id', self._tenant_id)
# Create PT with bound port
@ -117,12 +119,13 @@ class ApiManagerMixin(object):
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
elif res.status_int >= webob.exc.HTTPClientError.code:
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
return self.deserialize(self.fmt, res) if deserialize else res
def _show_resource(self, id, plural, expected_res_status=None,
is_admin_context=False, tenant_id=None):
is_admin_context=False, tenant_id=None,
deserialize=True):
req = self.new_show_request(plural, id, fmt=self.fmt)
req.environ['neutron.context'] = context.Context(
'', tenant_id or self._tenant_id, is_admin_context)
@ -130,22 +133,23 @@ class ApiManagerMixin(object):
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
elif res.status_int >= webob.exc.HTTPClientError.code:
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
return self.deserialize(self.fmt, res) if deserialize else res
def _delete_resource(self, id, plural, is_admin_context=False,
expected_res_status=None, tenant_id=None):
expected_res_status=None, tenant_id=None,
deserialize=True):
req = self.new_delete_request(plural, id)
req.environ['neutron.context'] = context.Context(
'', tenant_id or self._tenant_id, is_admin_context)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
elif res.status_int >= webob.exc.HTTPClientError.code:
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
if res.status_int != 204:
return self.deserialize(self.fmt, res)
return self.deserialize(self.fmt, res) if deserialize else res
def _get_object(self, type, id, api, expected_res_status=None):
req = self.new_show_request(type, id, self.fmt)
@ -187,6 +191,14 @@ class ApiManagerMixin(object):
self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
def _bind_subport(self, ctx, trunk, port):
with mock.patch.object(trunk_server.TrunkSkeleton, '__init__',
return_value=None):
trunk_skeleton = trunk_server.TrunkSkeleton()
port['trunk_id'] = trunk['trunk']['id']
port['port_id'] = port['id']
trunk_skeleton.update_subport_bindings(ctx, [port])
def _unbind_port(self, port_id):
data = {'port': {'binding:host_id': ''}}
req = self.new_update_request('ports', data, port_id,
@ -214,15 +226,30 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
def _is_gbp_resource(plural):
return plural in gpolicy.RESOURCE_ATTRIBUTE_MAP
def _is_sfc_resource(plural):
return plural in sfc.RESOURCE_ATTRIBUTE_MAP
def _is_flowc_resource(plural):
return plural in flowclassifier.RESOURCE_ATTRIBUTE_MAP
def _is_valid_resource(plural):
return _is_gbp_resource(plural) or _is_sc_resource(plural)
return (_is_gbp_resource(plural) or _is_sc_resource(plural) or
_is_flowc_resource(plural) or _is_sfc_resource(plural))
def _get_prefix(plural):
if _is_flowc_resource(plural) or _is_sfc_resource(plural):
return 'sfc/'
return ''
# Update Method
if item.startswith('update_'):
resource = item[len('update_'):]
plural = cm.get_resource_plural(resource)
if _is_valid_resource(plural):
r = _get_prefix(plural) + resource
def update_wrapper(id, **kwargs):
return self._update_resource(id, resource, **kwargs)
return self._update_resource(id, r, **kwargs)
return update_wrapper
# Show Method
if item.startswith('show_'):
@ -230,7 +257,8 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
plural = cm.get_resource_plural(resource)
if _is_valid_resource(plural):
def show_wrapper(id, **kwargs):
return self._show_resource(id, plural, **kwargs)
p = _get_prefix(plural) + plural
return self._show_resource(id, p, **kwargs)
return show_wrapper
# Create Method
if item.startswith('create_'):
@ -238,7 +266,8 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
plural = cm.get_resource_plural(resource)
if _is_valid_resource(plural):
def create_wrapper(**kwargs):
return self._create_resource(resource, **kwargs)
r = _get_prefix(plural) + resource
return self._create_resource(r, **kwargs)
return create_wrapper
# Delete Method
if item.startswith('delete_'):
@ -246,7 +275,8 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
plural = cm.get_resource_plural(resource)
if _is_valid_resource(plural):
def delete_wrapper(id, **kwargs):
return self._delete_resource(id, plural, **kwargs)
p = _get_prefix(plural) + plural
return self._delete_resource(id, p, **kwargs)
return delete_wrapper
raise AttributeError
@ -350,6 +380,9 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
'gp_plugin_name': gp_plugin,
'sc_plugin_name': sc_plugin}
# Always install SFC plugin for convenience
service_plugins['sfc_plugin_name'] = 'sfc'
service_plugins['flowc_plugin_name'] = 'flow_classifier'
extensions.append_api_extensions_path(
gbpservice.neutron.extensions.__path__)
super(GroupPolicyDbTestCase, self).setUp(

View File

@ -180,7 +180,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
vm.name = 'someid'
nova_client.return_value = vm
self.extension_attributes = ('router:external', DN,
self.extension_attributes = ('router:external', DN, 'apic:svi',
'apic:nat_type', 'apic:snat_host_pool',
CIDR, PROV, CONS)
# REVISIT: Note that the aim_driver sets create_auto_ptg to
@ -300,6 +300,15 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
return self.deserialize(self.fmt,
req.get_response(self.api))['subnet']
def _show_port(self, id):
req = self.new_show_request('ports', id, fmt=self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))['port']
def _show_network(self, id):
req = self.new_show_request('networks', id, fmt=self.fmt)
return self.deserialize(self.fmt,
req.get_response(self.api))['network']
def _show_subnetpool(self, id):
req = self.new_show_request('subnetpools', id, fmt=self.fmt)
return self.deserialize(self.fmt,

View File

@ -0,0 +1,981 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aim.api import infra as aim_infra
from aim.api import resource as aim_res
from aim.api import service_graph as aim_sg
import mock
from networking_sfc.extensions import flowclassifier as flowc_ext
from networking_sfc.extensions import sfc as sfc_ext
from networking_sfc.services.flowclassifier.common import config as flc_cfg
from networking_sfc.services.flowclassifier import driver_manager as fc_driverm
from networking_sfc.services.sfc.common import config as sfc_cfg
from networking_sfc.services.sfc import driver_manager as sfc_driverm
from neutron.callbacks import exceptions as c_exc
from neutron import context
from neutron import manager
from oslo_log import log as logging
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_aim_mapping_driver as test_aim_base)
LOG = logging.getLogger(__name__)
class Rollback(Exception):
pass
class TestAIMServiceFunctionChainingBase(test_aim_base.AIMBaseTestCase):
def setUp(self, *args, **kwargs):
sfc_cfg.cfg.CONF.set_override('drivers', ['aim'], group='sfc')
flc_cfg.cfg.CONF.set_override('drivers', ['aim'],
group='flowclassifier')
config.cfg.CONF.set_override(
'network_vlan_ranges', ['physnet1:100:200'], group='ml2_type_vlan')
ml2_options = {'mechanism_drivers': ['apic_aim', 'openvswitch'],
'extension_drivers': ['apic_aim', 'port_security',
'dns'],
'type_drivers': ['opflex', 'local', 'vlan'],
'tenant_network_types': ['vlan']}
# NOTE(ivar): the SFC and FLC driver managers load the driver names in
# the default parameters of their INIT functions. In Python, default
# params are evaluated only once when the module is loaded hence
# causing issues in the tests if those modules ever get loaded before
# the aim override happens. We need to reload the modules at this point
# to fix the issue.
reload(fc_driverm)
reload(sfc_driverm)
super(TestAIMServiceFunctionChainingBase, self).setUp(
*args, ml2_options=ml2_options, trunk_plugin='trunk', **kwargs)
self.agent_conf = test_group_policy_db.AGENT_CONF
self._sfc_driver = None
self._flowc_driver = None
self._sfc_plugin = None
self._flowc_plugin = None
self._aim_mech_driver = None
self.hlink1 = aim_infra.HostLink(host_name='h1', interface_name='eth0',
path='topology/pod-1/paths-101/pathep-[eth1/1]')
self.hlink2 = aim_infra.HostLink(host_name='h2', interface_name='eth0',
path='topology/pod-1/paths-102/pathep-[eth1/1]')
self.path_by_host = {'h1': 'topology/pod-1/paths-101/pathep-[eth1/1]',
'h2': 'topology/pod-1/paths-102/pathep-[eth1/1]'}
self.physdom = aim_res.PhysicalDomain(name='sfc-phys', monitored=True)
self.aim_mgr.create(self._aim_context, self.hlink1)
self.aim_mgr.create(self._aim_context, self.hlink2)
self.aim_mgr.create(self._aim_context, self.physdom)
self.aim_mgr.create(self._aim_context,
aim_infra.HostDomainMappingV2(
host_name='h1', domain_name=self.physdom.name,
domain_type='PhysDom'))
self.aim_mgr.create(self._aim_context,
aim_infra.HostDomainMappingV2(
host_name='h2', domain_name=self.physdom.name,
domain_type='PhysDom'))
self._plugin = manager.NeutronManager.get_plugin()
self._plugin.remove_networks_from_down_agents = mock.Mock()
self._plugin.is_agent_down = mock.Mock(return_value=False)
self._ctx = context.get_admin_context()
def tearDown(self):
LOG.warning("SFCDs used in this test: %s",
self.sfc_plugin.driver_manager.drivers.keys())
LOG.warning("FLCDs used in this test: %s",
self.flowc_plugin.driver_manager.drivers.keys())
# Always reset configuration to dummy driver. Any
# test which requires to configure a different
# policy driver would have done so in it's setup
# (and should have ideally reset it too).
config.cfg.CONF.set_override('drivers', ['dummy'], group='sfc')
config.cfg.CONF.set_override('drivers', ['dummy'],
group='flowclassifier')
super(TestAIMServiceFunctionChainingBase, self).tearDown()
@property
def sfc_plugin(self):
if not self._sfc_plugin:
plugins = manager.NeutronManager.get_service_plugins()
self._sfc_plugin = plugins.get(sfc_ext.SFC_EXT)
return self._sfc_plugin
@property
def flowc_plugin(self):
if not self._flowc_plugin:
plugins = manager.NeutronManager.get_service_plugins()
self._flowc_plugin = plugins.get(flowc_ext.FLOW_CLASSIFIER_EXT)
return self._flowc_plugin
@property
def sfc_driver(self):
# aim_mapping policy driver reference
if not self._sfc_driver:
self._sfc_driver = (
self.sfc_plugin.driver_manager.drivers['aim'].obj)
return self._sfc_driver
@property
def flowc_driver(self):
# aim_mapping policy driver reference
if not self._flowc_driver:
self._flowc_driver = (
self.flowc_plugin.driver_manager.drivers['aim'].obj)
return self._flowc_driver
@property
def aim_mech(self):
if not self._aim_mech_driver:
self._aim_mech_driver = (
self._plugin.mechanism_manager.mech_drivers['apic_aim'].obj)
return self._aim_mech_driver
def _create_simple_ppg(self, pairs=2, leftn_id=None, rightn_id=None):
nets = []
# Pairs go in 2 networks
if not leftn_id or not rightn_id:
for i in range(2):
net = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net, '192.168.%s.1' % i,
'192.168.%s.0/24' % i)
nets.append(net['network']['id'])
else:
nets = [leftn_id, rightn_id]
port_pairs = []
for i in range(pairs):
p1 = self._make_port(self.fmt, nets[0])['port']
self._bind_port_to_host(p1['id'], 'h%s' % ((i % 2) + 1))
p2 = self._make_port(self.fmt, nets[1])['port']
self._bind_port_to_host(p2['id'], 'h%s' % ((i % 2) + 1))
pp = self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)['port_pair']
port_pairs.append(pp)
# This goes through
return self.create_port_pair_group(
port_pairs=[pp['id'] for pp in port_pairs],
expected_res_status=201)['port_pair_group']
def _create_simple_flowc(self, src_svi=False, dst_svi=False):
kwargs = {}
def get_svi_kwargs():
return {'apic:svi': True}
if src_svi:
# We need to create the L3Out and the External network
kwargs = get_svi_kwargs()
net1 = self._make_network(self.fmt, 'net1', True,
arg_list=self.extension_attributes,
**kwargs)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
kwargs = {}
if dst_svi:
kwargs = get_svi_kwargs()
net2 = self._make_network(self.fmt, 'net2', True,
arg_list=self.extension_attributes,
**kwargs)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
return self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24',
expected_res_status=201)['flow_classifier']
def _create_simple_port_chain(self, flowcs=1, ppgs=2, flowcs_args=None,
ppg_args=None):
flowc_ids = []
ppg_args = ppg_args or []
flowcs_args = flowcs_args or []
for i in range(flowcs):
try:
flowc_ids.append(
self._create_simple_flowc(**flowcs_args[i])['id'])
except IndexError:
flowc_ids.append(self._create_simple_flowc()['id'])
ppg_ids = []
for i in range(ppgs):
try:
ppg_ids.append(self._create_simple_ppg(**ppg_args[i])['id'])
except IndexError:
ppg_ids.append(self._create_simple_ppg()['id'])
return self.create_port_chain(port_pair_groups=ppg_ids,
flow_classifiers=flowc_ids,
expected_res_status=201)['port_chain']
def _verify_ppg_mapping(self, ppg, tenant):
apic_tn = tenant
# Verify expected AIM model
ctx = self._aim_context
# DeviceCluster. Only one created
dc = self.aim_mgr.get(ctx, aim_sg.DeviceCluster(
tenant_name=apic_tn, name='ppg_' + ppg['id']))
self.assertIsNotNone(dc)
self.assertEqual(self.physdom.name, dc.physical_domain_name)
pps = [self.show_port_pair(x)['port_pair'] for x in ppg['port_pairs']]
for pp in pps:
self.assertIsNotNone(self.aim_mgr.get(
ctx, aim_sg.ConcreteDevice(tenant_name=apic_tn,
device_cluster_name=dc.name,
name='pp_' + pp['id'])))
for pp in pps:
# Each of these CD have 2 CDIs
iprt = self._show_port(pp['ingress'])
eprt = self._show_port(pp['egress'])
pp_dcis = self.aim_mgr.find(
ctx, aim_sg.ConcreteDeviceInterface, tenant_name=apic_tn,
device_cluster_name=dc.name, device_name='pp_' + pp['id'])
self.assertEqual(self.path_by_host[iprt['binding:host_id']],
pp_dcis[0].path)
self.assertEqual(self.path_by_host[eprt['binding:host_id']],
pp_dcis[1].path)
iepg = self.aim_mech._get_epg_by_network_id(self._ctx.session,
iprt['network_id'])
eepg = self.aim_mech._get_epg_by_network_id(self._ctx.session,
eprt['network_id'])
self.assertFalse(self.aim_mgr.get(ctx, iepg).sync)
self.assertFalse(self.aim_mgr.get(ctx, eepg).sync)
# 1 PPG means 1 service, which has 2 DeviceClusterInterfaces
# comprehensive of all the above ConcreteDeviceInterfaces
idci = self.aim_mgr.get(ctx, aim_sg.DeviceClusterInterface(
tenant_name=dc.tenant_name, device_cluster_name=dc.name,
name='ingress'))
edci = self.aim_mgr.get(ctx, aim_sg.DeviceClusterInterface(
tenant_name=dc.tenant_name, device_cluster_name=dc.name,
name='egress'))
self.assertIsNotNone(idci)
self.assertIsNotNone(edci)
# Ingress CDIs
ingr_cdis = [
self.aim_mgr.get(
ctx, aim_sg.ConcreteDeviceInterface(
tenant_name=apic_tn, device_cluster_name=dc.name,
device_name='pp_' + pp['id'], name='prt_' + pp['ingress']))
for pp in pps]
self.assertEqual({ingr.dn for ingr in ingr_cdis},
set(idci.concrete_interfaces))
# Egress CDIs
egr_cdis = [
self.aim_mgr.get(
ctx, aim_sg.ConcreteDeviceInterface(
tenant_name=apic_tn, device_cluster_name=dc.name,
device_name='pp_' + pp['id'], name='prt_' + pp['egress']))
for pp in pps]
self.assertEqual({egr.dn for egr in egr_cdis},
set(edci.concrete_interfaces))
# Redirect Policy Ingress
irp = self.aim_mgr.get(ctx, aim_sg.ServiceRedirectPolicy(
tenant_name=dc.tenant_name, name='ingr_ppg_' + ppg['id']))
erp = self.aim_mgr.get(ctx, aim_sg.ServiceRedirectPolicy(
tenant_name=dc.tenant_name, name='egr_ppg_' + ppg['id']))
self.assertIsNotNone(irp)
self.assertIsNotNone(erp)
# Ingress Ports
iprts = [self._show_port(pp['ingress']) for pp in pps]
self.assertEqual(
sorted([{'ip': iprt['fixed_ips'][0]['ip_address'],
'mac': iprt['mac_address']} for iprt in iprts]),
irp.destinations)
eprts = [self._show_port(pp['egress']) for pp in pps]
self.assertEqual(
sorted([{'ip': eprt['fixed_ips'][0]['ip_address'],
'mac': eprt['mac_address']} for eprt in eprts]),
erp.destinations)
def _verify_pc_mapping(self, pc):
ctx = self._aim_context
flowcs = [self.show_flow_classifier(x)['flow_classifier'] for x in
pc['flow_classifiers']]
flowc_tenants = set([self._show_network(
flowc['l7_parameters']['logical_destination_network'])['tenant_id']
for flowc in flowcs])
ppgs = [self.show_port_pair_group(x)['port_pair_group'] for x in
pc['port_pair_groups']]
self.assertEqual(
len(flowcs), len(self.aim_mgr.find(ctx, aim_res.Contract)))
self.assertEqual(
len(flowcs), len(self.aim_mgr.find(ctx, aim_res.ContractSubject)))
self.assertEqual(
len(flowc_tenants) * len(ppgs),
len(self.aim_mgr.find(ctx, aim_sg.DeviceClusterContext)))
self.assertEqual(
len(flowc_tenants) * len(ppgs) * 2,
len(self.aim_mgr.find(ctx, aim_sg.DeviceClusterInterfaceContext)))
self.assertEqual(
len(flowc_tenants),
len(self.aim_mgr.find(ctx, aim_sg.ServiceGraph)))
for flowc in flowcs:
src_net = self._show_network(
flowc['l7_parameters']['logical_source_network'])
dst_net = self._show_network(
flowc['l7_parameters']['logical_destination_network'])
apic_tn = 'prj_' + dst_net['tenant_id']
device_clusters = []
sg = self.aim_mgr.get(ctx, aim_sg.ServiceGraph(
tenant_name=apic_tn, name='ptc_' + pc['id']))
self.assertIsNotNone(sg)
# Verify Flow Classifier mapping
contract = self.aim_mgr.get(
ctx, aim_res.Contract(tenant_name=apic_tn,
name='flc_' + flowc['id']))
self.assertIsNotNone(contract)
subject = self.aim_mgr.get(
ctx, aim_res.ContractSubject(
tenant_name=apic_tn, contract_name='flc_' + flowc['id'],
name='ptc_' + pc['id']))
self.assertIsNotNone(subject)
self.assertEqual(['openstack_AnyFilter'], subject.bi_filters)
src_cidr = flowc['source_ip_prefix']
dst_cird = flowc['destination_ip_prefix']
for net, pref, cidr in [(src_net, 'src_', src_cidr),
(dst_net, 'dst_', dst_cird)]:
if net['apic:svi']:
# TODO(ivar): this will not work, there's no L3Outside
# DN extension for external networks.
ext = aim_res.ExternalNetwork.from_dn(
net['apic:distinguished_names']['ExternalNetwork'])
if cidr in ['0.0.0.0/0', '::/0']:
# use default external EPG
ext_net = self.aim_mgr.get(ctx, ext)
else:
ext_net = self.aim_mgr.get(
ctx, aim_res.ExternalNetwork(
tenant_name=ext.tenant_name,
l3out_name=ext.l3out_name,
name=cidr.replace(
'/', '_') + '_' + 'net_' + net['id']))
ext_sub = self.aim_mgr.get(ctx, aim_res.ExternalSubnet(
tenant_name=ext.tenant_name, l3out_name=ext.l3out_name,
external_network_name=ext_net.name, cidr=cidr))
self.assertIsNotNone(ext_net)
self.assertIsNotNone(ext_sub)
self.assertTrue(
contract.name in (ext_net.consumed_contract_names if
pref == 'src_' else
ext_net.provided_contract_names))
else:
epg = self.aim_mgr.get(
ctx, aim_res.EndpointGroup.from_dn(
net['apic:distinguished_names']['EndpointGroup']))
self.assertTrue(
contract.name in (epg.consumed_contract_names if
pref == 'src_' else
epg.provided_contract_names))
for ppg in ppgs:
self._verify_ppg_mapping(ppg, apic_tn)
device_cluster = self.aim_mgr.get(
ctx, aim_sg.DeviceCluster(tenant_name=apic_tn,
name='ppg_' + ppg['id']))
device_clusters.append(device_cluster)
dcc = self.aim_mgr.get(
ctx, aim_sg.DeviceClusterContext(
tenant_name=sg.tenant_name,
contract_name="any",
service_graph_name=sg.name,
node_name=device_cluster.name))
self.assertIsNotNone(dcc)
self.assertEqual(device_cluster.name, dcc.device_cluster_name)
self.assertEqual(apic_tn, dcc.device_cluster_tenant_name)
# Get ingress/egress BD
pp = self.show_port_pair(ppg['port_pairs'][0])['port_pair']
ingress_net = self._get_port_network(pp['ingress'])
egress_net = self._get_port_network(pp['egress'])
ingress_bd = ingress_net[
'apic:distinguished_names']['BridgeDomain']
egress_bd = egress_net[
'apic:distinguished_names']['BridgeDomain']
dci = aim_sg.DeviceClusterInterface(
tenant_name=device_cluster.tenant_name,
device_cluster_name=device_cluster.name, name='ingress')
dcic = aim_sg.DeviceClusterInterfaceContext(
tenant_name=apic_tn, contract_name="any",
service_graph_name=sg.name, node_name=device_cluster.name,
connector_name='consumer')
dcic = self.aim_mgr.get(ctx, dcic)
self.assertIsNotNone(dcic)
self.assertEqual(ingress_bd, dcic.bridge_domain_dn)
self.assertEqual(dci.dn, dcic.device_cluster_interface_dn)
self.assertNotEqual('', dcic.service_redirect_policy_dn)
dci = aim_sg.DeviceClusterInterface(
tenant_name=device_cluster.tenant_name,
device_cluster_name=device_cluster.name, name='egress')
dcic = aim_sg.DeviceClusterInterfaceContext(
tenant_name=apic_tn, contract_name="any",
service_graph_name=sg.name, node_name=device_cluster.name,
connector_name='provider')
dcic = self.aim_mgr.get(ctx, dcic)
self.assertIsNotNone(dcic)
self.assertEqual(egress_bd, dcic.bridge_domain_dn)
self.assertEqual(dci.dn, dcic.device_cluster_interface_dn)
self.assertNotEqual('', dcic.service_redirect_policy_dn)
self.assertEqual(
sorted({'name': x.name, 'device_cluster_name': x.name,
'device_cluster_tenant_name': x.tenant_name}
for x in device_clusters),
sorted(sg.linear_chain_nodes))
def _verify_pc_delete(self, pc):
ctx = self._aim_context
self.delete_port_chain(pc['id'])
# PC and Flowc unmapped
self.assertEqual([], self.aim_mgr.find(ctx, aim_res.Contract))
self.assertEqual([], self.aim_mgr.find(ctx, aim_res.ContractSubject))
self.assertEqual(
[], self.aim_mgr.find(ctx, aim_sg.DeviceClusterContext))
self.assertEqual(
[], self.aim_mgr.find(ctx, aim_sg.DeviceClusterInterfaceContext))
self.assertEqual([], self.aim_mgr.find(ctx, aim_sg.ServiceGraph))
# PPGs unmapped
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ServiceRedirectPolicy)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ConcreteDeviceInterface)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ConcreteDevice)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.DeviceCluster)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.DeviceClusterInterface)))
ppgs = [self.show_port_pair_group(x)['port_pair_group'] for x in
pc['port_pair_groups']]
for ppg in ppgs:
pps = [self.show_port_pair(x)['port_pair'] for x in
ppg['port_pairs']]
for pp in pps:
iprt = self._show_port(pp['ingress'])
eprt = self._show_port(pp['egress'])
iepg = self.aim_mech._get_epg_by_network_id(self._ctx.session,
iprt['network_id'])
eepg = self.aim_mech._get_epg_by_network_id(self._ctx.session,
eprt['network_id'])
self.assertTrue(self.aim_mgr.get(ctx, iepg).sync)
self.assertTrue(self.aim_mgr.get(ctx, eepg).sync)
def _delete_network(self, network_id):
req = self.new_delete_request('networks', network_id)
return req.get_response(self.api)
class TestPortPair(TestAIMServiceFunctionChainingBase):
def test_port_pair_validation(self):
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)
# Same network ports
p3 = self._make_port(self.fmt, net2['network']['id'])['port']
p4 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p3['id'], 'h1')
self._bind_port_to_host(p4['id'], 'h2')
self.create_port_pair(ingress=p3['id'], egress=p4['id'],
expected_res_status=500)
# Also unbound ports can be used
p5 = self._make_port(self.fmt, net1['network']['id'])['port']
self.create_port_pair(ingress=p3['id'], egress=p5['id'],
expected_res_status=400)
# Ports with no domain
def test_port_pair_validation_no_domain(self):
self.aim_mgr.delete(self._aim_context, self.physdom)
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p1['id'], 'h1')
# H3 has no domain specified
self._bind_port_to_host(p2['id'], 'h3')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
# Both ports no domain
p3 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p3['id'], 'h4')
self.create_port_pair(ingress=p3['id'], egress=p2['id'],
expected_res_status=500)
# Add domain, but different than H1
pd = self.aim_mgr.create(
self._aim_context, aim_infra.HostDomainMappingV2(
host_name='h3', domain_name='diff-name',
domain_type='PhysDom'))
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
# Multi domain per host
self.aim_mgr.create(self._aim_context, aim_infra.HostDomainMappingV2(
host_name='h3', domain_name=self.physdom.name,
domain_type='PhysDom'))
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
# Delete extra domain
self.aim_mgr.delete(self._aim_context, pd)
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)
def test_port_pair_validation_trunk(self):
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
snet1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.167.0.1', '192.167.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
snet2 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.167.1.1', '192.167.1.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
sp1 = self._make_port(self.fmt, snet1['network']['id'])['port']
sp2 = self._make_port(self.fmt, snet2['network']['id'])['port']
trunk1 = self._create_resource('trunk', port_id=p1['id'])
trunk2 = self._create_resource('trunk', port_id=p2['id'])
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self._bind_subport(self._ctx, trunk1, sp1)
self._bind_subport(self._ctx, trunk2, sp2)
self.driver._trunk_plugin.add_subports(
self._ctx, trunk1['trunk']['id'],
{'sub_ports': [{'port_id': sp1['id'],
'segmentation_type': 'vlan',
'segmentation_id': 100}]})
self.driver._trunk_plugin.add_subports(
self._ctx, trunk2['trunk']['id'],
{'sub_ports': [{'port_id': sp2['id'],
'segmentation_type': 'vlan',
'segmentation_id': 100}]})
self.create_port_pair(ingress=sp1['id'], egress=sp2['id'],
expected_res_status=201)
class TestPortPairGroup(TestAIMServiceFunctionChainingBase):
def test_ppg_validation(self):
# Correct creation
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
# Service 1
p11 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p11['id'], 'h1')
p12 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p12['id'], 'h1')
pp1 = self.create_port_pair(ingress=p11['id'], egress=p12['id'],
expected_res_status=201)['port_pair']
# Service 2
p21 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p21['id'], 'h2')
p22 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p22['id'], 'h2')
pp2 = self.create_port_pair(ingress=p21['id'], egress=p22['id'],
expected_res_status=201)['port_pair']
# This goes through
ppg1 = self.create_port_pair_group(
port_pairs=[pp1['id'], pp2['id']],
expected_res_status=201)['port_pair_group']
# Use invalid pairs
net3 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net3, '192.168.0.1', '192.168.0.0/24')
p31 = self._make_port(self.fmt, net3['network']['id'])['port']
self._bind_port_to_host(p31['id'], 'h1')
pp3 = self.create_port_pair(ingress=p21['id'], egress=p31['id'],
expected_res_status=201)['port_pair']
self.delete_port_pair_group(ppg1['id'])
self.create_port_pair_group(port_pairs=[pp1['id'], pp3['id']],
expected_res_status=500)
# Works with only one PP
ppg2 = self.create_port_pair_group(
port_pairs=[pp3['id']],
expected_res_status=201)['port_pair_group']
# But update fails
self.update_port_pair_group(
ppg2['id'], port_pairs=[pp3['id'], pp1['id']],
expected_res_status=500)
class TestFlowClassifier(TestAIMServiceFunctionChainingBase):
def test_fc_validation(self):
# Correct classifier
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
fc = self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24',
expected_res_status=201)['flow_classifier']
self.delete_flow_classifier(fc['id'], expected_res_status=204)
# Wrong FCs
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
destination_ip_prefix='192.168.1.0/24',
expected_res_status=400)
self.create_flow_classifier(
l7_parameters={'logical_source_network': net1['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net1['network']['id']},
source_ip_prefix='192.168.0.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': ''},
source_ip_prefix='192.168.0.0/24', expected_res_status=400)
self._delete_network(net2['network']['id'])
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24', expected_res_status=404)
class TestPortChain(TestAIMServiceFunctionChainingBase):
def setUp(self, *args, **kwargs):
super(TestPortChain, self).setUp()
self.src_svi = False
self.dst_svi = False
def _get_port_network(self, port_id):
port = self._show_port(port_id)
return self._show_network(port['network_id'])
def test_pc_validation(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=2)
ppg2 = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)['port_chain']
# Same classifier is not allowed.
self.create_port_chain(port_pair_groups=[ppg2['id']],
flow_classifiers=[fc['id']],
expected_res_status=409)
self.update_port_chain(
pc['id'], port_pair_groups=[ppg['id'], ppg2['id']],
expected_res_status=200)
def test_pc_validation_network_conflict(self):
nets = []
for i in range(3):
net = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net, '192.168.%s.1' % i,
'192.168.%s.0/24' % i)
nets.append(net['network']['id'])
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=2, leftn_id=nets[0],
rightn_id=nets[1])
ppg2 = self._create_simple_ppg(pairs=2, leftn_id=nets[0],
rightn_id=nets[1])
# Conflict with only one network
ppg3 = self._create_simple_ppg(pairs=2, leftn_id=nets[0],
rightn_id=nets[2])
self.create_port_chain(port_pair_groups=[ppg['id'], ppg2['id']],
flow_classifiers=[fc['id']],
expected_res_status=500)
self.create_port_chain(port_pair_groups=[ppg['id'], ppg3['id']],
flow_classifiers=[fc['id']],
expected_res_status=500)
def test_pc_mapping(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
def test_pc_mapping_two_flowcs(self):
pc = self._create_simple_port_chain(
flowcs=2, ppgs=3, ppg_args=[{'pairs': 1}, {'pairs': 2},
{'pairs': 3}])
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
def test_pc_mapping_no_flowcs(self):
pc = self._create_simple_port_chain(
flowcs=0, ppgs=3, ppg_args=[{'pairs': 1}, {'pairs': 2},
{'pairs': 3}])
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
def test_ppg_update(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)['port_chain']
pps = ppg['port_pairs']
# remove one pp
ppg = self.update_port_pair_group(
ppg['id'], port_pairs=[pps[0]])['port_pair_group']
self._verify_pc_mapping(pc)
# Replace pp
ppg = self.update_port_pair_group(
ppg['id'], port_pairs=[pps[1]])['port_pair_group']
self._verify_pc_mapping(pc)
# Add pp
ppg = self.update_port_pair_group(
ppg['id'], port_pairs=pps)['port_pair_group']
self._verify_pc_mapping(pc)
def test_flowc_update(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=1)
self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)
res = self._delete_network(
fc['l7_parameters']['logical_source_network'])
self.assertEqual(400, res.status_int)
res = self._delete_network(
fc['l7_parameters']['logical_destination_network'])
self.assertEqual(400, res.status_int)
def test_vrf_update(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=2)
self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)
self.aim_mgr.create(
self._aim_context, aim_res.EndpointGroup(
tenant_name='new', app_profile_name='new', name='new'))
net_db = self._plugin._get_network(
self._ctx, fc['l7_parameters']['logical_source_network'])
self.assertRaises(c_exc.CallbackFailure,
self.aim_mech._set_network_vrf_and_notify,
self._ctx, net_db.aim_mapping,
aim_res.VRF(tenant_name='new', name='new'))
net_db = self._plugin._get_network(
self._ctx, fc['l7_parameters']['logical_destination_network'])
self.assertRaises(c_exc.CallbackFailure,
self.aim_mech._set_network_vrf_and_notify,
self._ctx, net_db.aim_mapping,
aim_res.VRF(tenant_name='new', name='new'))
# Also changing EPG affects PC if tenant changes
self.assertRaises(c_exc.CallbackFailure,
self.aim_mech._set_network_epg_and_notify,
self._ctx, net_db.aim_mapping,
aim_res.EndpointGroup(tenant_name='new',
app_profile_name='new',
name='new'))
# But it doesn't if tenant stays the same
self.aim_mgr.create(
self._aim_context, aim_res.EndpointGroup(
tenant_name=net_db.aim_mapping.epg_tenant_name,
app_profile_name='new', name='new'))
self.aim_mech._set_network_epg_and_notify(
self._ctx, net_db.aim_mapping, aim_res.EndpointGroup(
tenant_name=net_db.aim_mapping.epg_tenant_name,
app_profile_name='new', name='new'))
pp = self.show_port_pair(ppg['port_pairs'][0])['port_pair']
net = self._get_port_network(pp['ingress'])
net_db = self._plugin._get_network(self._ctx, net['id'])
# Modifying EPG in service nets has no effect
self.aim_mech._set_network_epg_and_notify(
self._ctx, net_db.aim_mapping,
aim_res.EndpointGroup(tenant_name='new', app_profile_name='new',
name='new'))
# But it fails when VRF is changed
self.assertRaises(c_exc.CallbackFailure,
self.aim_mech._set_network_vrf_and_notify,
self._ctx, net_db.aim_mapping,
aim_res.VRF(tenant_name='new', name='new'))
def test_pc_mapping_no_host_mapping(self):
ctx = self._aim_context
self.aim_mgr.delete_all(ctx, aim_infra.HostDomainMappingV2)
# Since one physdom exists, everything works just fine
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
# If I also delete the physdom, everything fails
self.aim_mgr.delete(ctx, self.physdom)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=500)
def test_pc_mapping_same_provider_diff_consumer(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
# New classifier with only one change in subnet
fc2 = self.create_flow_classifier(
l7_parameters={
'logical_source_network': fc[
'l7_parameters']['logical_source_network'],
'logical_destination_network': fc[
'l7_parameters']['logical_destination_network']},
source_ip_prefix='192.168.3.0/24',
destination_ip_prefix=fc['destination_ip_prefix'],
expected_res_status=201)['flow_classifier']
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc['id']],
expected_res_status=201)['port_chain']
pc = self.update_port_chain(pc['id'],
flow_classifiers=[fc['id'], fc2['id']],
expected_res_status=200)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
def test_pc_mapping_default_sub_dst(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
# New classifier with only one change in subnet
fc2 = self.create_flow_classifier(
l7_parameters={
'logical_source_network': fc[
'l7_parameters']['logical_source_network'],
'logical_destination_network': fc[
'l7_parameters']['logical_destination_network']},
source_ip_prefix=fc['source_ip_prefix'],
destination_ip_prefix='0.0.0.0/0',
expected_res_status=201)['flow_classifier']
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc2['id']],
expected_res_status=201)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
def test_pc_mapping_default_sub_src(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
# New classifier with only one change in subnet
fc2 = self.create_flow_classifier(
l7_parameters={
'logical_source_network': fc[
'l7_parameters']['logical_source_network'],
'logical_destination_network': fc[
'l7_parameters']['logical_destination_network']},
source_ip_prefix='0.0.0.0/0',
destination_ip_prefix=fc['destination_ip_prefix'],
expected_res_status=201)['flow_classifier']
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc2['id']],
expected_res_status=201)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
def test_pc_mapping_default_sub_both(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
# New classifier with only one change in subnet
fc2 = self.create_flow_classifier(
l7_parameters={
'logical_source_network': fc[
'l7_parameters']['logical_source_network'],
'logical_destination_network': fc[
'l7_parameters']['logical_destination_network']},
source_ip_prefix='0.0.0.0/0',
destination_ip_prefix='0.0.0.0/0',
expected_res_status=201)['flow_classifier']
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc2['id']],
expected_res_status=201)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
# Enable once fixed on the SVI side.
def _test_pc_mapping_default_sub_ipv6(self):
fc = self._create_simple_flowc(src_svi=self.src_svi,
dst_svi=self.dst_svi)
# New classifier with only one change in subnet
fc2 = self.create_flow_classifier(
l7_parameters={
'logical_source_network': fc[
'l7_parameters']['logical_source_network'],
'logical_destination_network': fc[
'l7_parameters']['logical_destination_network']},
source_ip_prefix='::/0', destination_ip_prefix='::/0',
expected_res_status=201, ethertype='IPv6')['flow_classifier']
ppg = self._create_simple_ppg(pairs=2)
pc = self.create_port_chain(port_pair_groups=[ppg['id']],
flow_classifiers=[fc2['id']],
expected_res_status=201)['port_chain']
self._verify_pc_mapping(pc)
self._verify_pc_delete(pc)
class TestPortChainSVI(TestPortChain):
def setUp(self, *args, **kwargs):
super(TestPortChainSVI, self).setUp()
self.src_svi = True
self.dst_svi = True
def test_vrf_update(self):
# TODO(ivar): VRF is not in mapping, can't be updated
pass

View File

@ -80,6 +80,10 @@ gbpservice.neutron.servicechain.ncp_plumbers =
dummy_plumber = gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers.dummy_plumber:NoopPlumber
stitching_plumber = gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers.traffic_stitching_plumber:TrafficStitchingPlumber
admin_owned_resources_apic_plumber = gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers.admin_owned_resources_apic_tscp:AdminOwnedResourcesApicTSCP
networking_sfc.sfc.drivers =
aim = gbpservice.neutron.services.sfc.aim.sfc_driver:SfcAIMDriver
networking_sfc.flowclassifier.drivers =
aim = gbpservice.neutron.services.sfc.aim.flowc_driver:FlowclassifierAIMDriver
[build_sphinx]
source-dir = doc/source

View File

@ -13,6 +13,7 @@
-e git+https://git.openstack.org/openstack/neutron-vpnaas@newton-eol#egg=neutron-vpnaas
-e git+https://git.openstack.org/openstack/neutron-lbaas@newton-eol#egg=neutron-lbaas
-e git+https://git.openstack.org/openstack/neutron-fwaas@newton-eol#egg=neutron-fwaas
-e git+https://git.openstack.org/openstack/networking-sfc@newton-eol#egg=networking-sfc
hacking<0.12,>=0.11.0 # Apache-2.0
cliff>=1.15.0 # Apache-2.0