New APIC mechanism and extension drivers

This is a very preliminary version of a new APIC mechanism driver
utilizing the ACI Integration Module (AIM) library concurrently being
developed. A corresponding extension driver exposes details regarding
the mapping of the Neutron resources to APIC. These drivers require
the Ml2Plus extended driver APIs.

See the apic-aim-ml2-driver devref for implementation details and for
devstack configuration instructions.

Change-Id: I82df32f0880d6a0d53b305f6c6391fcbea049d1b
This commit is contained in:
Robert Kukura 2016-03-23 13:21:49 -04:00
parent 696005faae
commit 6e307e0a38
21 changed files with 1471 additions and 19 deletions

49
devstack/lib/apic_aim Normal file
View File

@ -0,0 +1,49 @@
function install_apic_aim {
echo_summary "Installing apic_aim"
install_apic_ml2
install_aim
install_opflex
}
function configure_apic_aim {
echo_summary "Configuring apic_aim"
# devstack/lib/neutron_plugins/ml2 does not allow overriding
# Q_PLUGIN_CLASS in override_defaults, so do it here instread
iniset $NEUTRON_CONF DEFAULT core_plugin ml2plus
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth auth_plugin v3password
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth auth_url $KEYSTONE_SERVICE_URI_V3
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth username admin
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth password $ADMIN_PASSWORD
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth user_domain_name default
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth project_domain_name default
iniset /$Q_PLUGIN_CONF_FILE apic_aim_auth project_name admin
init_aim
}
function install_aim {
git_clone $AIM_REPO $AIM_DIR $AIM_BRANCH
mv $AIM_DIR/test-requirements.txt $AIM_DIR/_test-requirements.txt
setup_develop $AIM_DIR
mv $AIM_DIR/_test-requirements.txt $AIM_DIR/test-requirements.txt
}
function init_aim {
aim -c $NEUTRON_CONF db-migration upgrade
}
function install_opflex {
git_clone $OPFLEX_REPO $OPFLEX_DIR $OPFLEX_BRANCH
mv $OPFLEX_DIR/test-requirements.txt $OPFLEX_DIR/_test-requirements.txt
touch $OPFLEX_DIR/setup.cfg
setup_develop $OPFLEX_DIR
mv $OPFLEX_DIR/_test-requirements.txt $OPFLEX_DIR/test-requirements.txt
}
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End:

View File

@ -26,6 +26,8 @@ AIM_REPO=http://github.com/noironetworks/aci-integration-module.git
AIM_DIR=$DEST/aim
APICML2_REPO=http://github.com/noironetworks/apic-ml2-driver.git
APICML2_DIR=$DEST/apic_ml2
OPFLEX_REPO=http://github.com/noironetworks/python-opflex-agent.git
OPFLEX_DIR=$DEST/opflexagent
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@ -81,17 +83,6 @@ function install_gbpui {
mv $GBPUI_DIR/_test-requirements.txt $GBPUI_DIR/test-requirements.txt
}
function install_aim {
git_clone $AIM_REPO $AIM_DIR $AIM_BRANCH
mv $AIM_DIR/test-requirements.txt $AIM_DIR/_test-requirements.txt
setup_develop $AIM_DIR
mv $AIM_DIR/_test-requirements.txt $AIM_DIR/test-requirements.txt
}
function init_aim {
aim -c $NEUTRON_CONF db-migration upgrade
}
function install_apic_ml2 {
git_clone $APICML2_REPO $APICML2_DIR $APICML2_BRANCH
mv $APICML2_DIR/test-requirements.txt $APICML2_DIR/_test-requirements.txt

View File

@ -1 +1,13 @@
NEUTRON_CREATE_INITIAL_NETWORKS="False"
ENABLE_APIC_AIM=${ENABLE_APIC_AIM:-False}
if [[ $ENABLE_APIC_AIM = True ]]; then
echo_summary "Overriding defaults for apic_aim"
Q_PLUGIN=${Q_PLUGIN:-ml2}
Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-opflex}
Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,vlan,opflex}
Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-apic_aim}
Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-apic_aim,port_security}
fi

View File

@ -49,6 +49,7 @@ if is_service_enabled group-policy; then
echo_summary "Preparing $GBP"
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing $GBP"
[[ $ENABLE_APIC_AIM = True ]] && install_apic_aim
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Installing $NFP"
[[ $DISABLE_BUILD_IMAGE = False ]] && prepare_nfp_image_builder
@ -60,9 +61,8 @@ if is_service_enabled group-policy; then
gbp_configure_neutron
[[ $ENABLE_NFP = True ]] && echo_summary "Configuring $NFP"
[[ $ENABLE_NFP = True ]] && nfp_configure_neutron
# install_apic_ml2
# install_aim
# init_aim
# REVISIT move installs to install phase?
# install_apic_ml2
install_gbpclient
install_gbpservice
[[ $ENABLE_NFP = True ]] && install_nfpgbpservice
@ -70,8 +70,9 @@ if is_service_enabled group-policy; then
[[ $ENABLE_NFP = True ]] && init_nfpgbpservice
install_gbpheat
install_gbpui
[[ $ENABLE_APIC_AIM = True ]] && configure_apic_aim
stop_apache_server
start_apache_server
start_apache_server
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing $GBP"
if [[ $ENABLE_NFP = True ]]; then

View File

@ -1,5 +1,8 @@
# Make sure the plugin name in local.conf is "gbp", as in: enable_plugin gbp <remote> <branch>
source $DEST/gbp/devstack/lib/gbp
[[ $ENABLE_APIC_AIM = True ]] && source $DEST/gbp/devstack/lib/apic_aim
ENABLE_NFP=${ENABLE_NFP:-False}
[[ $ENABLE_NFP = True ]] && source $DEST/gbp/devstack/lib/nfp
[[ $ENABLE_NFP = True ]] && DISABLE_BUILD_IMAGE=${DISABLE_BUILD_IMAGE:-False}
@ -21,14 +24,20 @@ GBPHEAT_REPO=${GBPHEAT_REPO:-${GIT_BASE}/openstack/group-based-policy-automation
GBPHEAT_BRANCH=${GBPHEAT_BRANCH:-master}
AIM_BRANCH=${AIM_BRANCH:-master}
APICML2_BRANCH=${APICML2_BRANCH:-master}
OPFLEX_BRANCH=${OPFLEX_BRANCH:-master}
# Enable necessary services, including group-policy (and disable others)
disable_service n-net
enable_service n-novnc
enable_service q-svc
enable_service q-agt
if [[ $ENABLE_APIC_AIM = True ]]; then
disable_service q-agt
disable_service q-l3
else
enable_service q-agt
enable_service q-l3
fi
enable_service q-dhcp
enable_service q-l3
enable_service q-fwaas
enable_service q-lbaas
enable_service q-meta

View File

@ -0,0 +1,91 @@
..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
APIC-AIM ML2 Driver
===================
The APIC-AIM ML2 mechanism driver and associated extension driver
utilize the ACI Integration Module (AIM) library to provide improved
integration between Neutron and the Cisco APIC. The most significant
advantage of the APIC-AIM ML2 drivers over the previous APIC ML2
drivers is that they are intended to coexist with the AIM GBP policy
driver, providing full simultaneous support for both Neutron and GBP
APIs within the same OpenStack deployment, including sharing of
resources between Neutron and GBP workflows where appropriate.
Additionally, the AIM-based mechanism and policy driver architecture
is completely transactional, and thus provides improved robustness,
performance, and scalability. A set of replicated AIM daemons is
responsible for continually maintaining consistency between the AIM DB
state specified by the drivers and the APIC state.
ML2Plus Plugin
--------------
The ML2Plus core plugin extends the ML2 plugin with several driver API
features that are needed for APIC AIM support. An extended
MechanismDriver abstract base class adds an ensure_tenant() method
that is called before any transaction creating a new resource, and
(soon) adds precommit and postcommit calls for operations on
additional resources such as address scope. An extended
ExtensionDriver base class will support extending those additional
resources.
ML2 configuration is unchanged, and compatibility is maintained with
all existing ML2 drivers.
APIC-AIM Mechanism Driver
-------------------------
The apic-aim ML2 mechanism driver maps Neutron resources to the APIC
resource configurations that provide the required Neutron networking
semantics. Currently, the following Neutron -> AIM mappings are
implemented:
tenant -> Tenant, ApplicationProfile
network -> BridgeDomain, default EndpointGroup
subnet -> Subnet
Neutron ports are realized as Endpoints within an APIC
EndpointGroup. A port created using Neutron APIs belongs to the
network's default EndpointGroup. A port created as a GBP PolicyTarget
does not use its PolicyTargetGroup's L2Policy's network's default
EndpointGroup, but instead belongs to an APIC EndpointGroup mapped
from its PolicyTargetGroup.
Additional mappings that are under development include:
address scope -> VRF
router -> contract rules
Port binding for the OpFlex L2 agent and support for the
get_gbp_details RPC are implemented. DVS port binding and other RPCs
remain to be implemented.
APIC-AIM Extension Driver
-------------------------
The apic-aim ML2 extension driver provides administrators with read
access to the distinguished names of the set of APIC resources to
which each Neutron resource is mapped, as well as to APIC-specific
status and AIM daemon synchronization status for those resources.
The extension driver may eventually also allow DNs of existing APIC
resources to be specified when creating Neutron resources.
DevStack Support
----------------
The ML2Plus core plugin and APIC-AIM mechanism and extension drivers
can be configured by including the following in local.conf when
running devstack::
enable_plugin gbp https://git.openstack.org/openstack/group-based-policy master
ENABLE_APIC_AIM=True
Note that the GBP devstack plugin installs the python-opflex-agent
repo, but does not yet configure or run the OpFlex L2 agent.

View File

@ -46,6 +46,7 @@ GBP Design
shared-resources
traffic-stitching-plumber-model
traffic-stitching-plumber-placement-type
apic-aim-ml2-driver
Module Reference
----------------

View File

@ -41,6 +41,8 @@
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"get_network:apic:distinguished_names": "rule:admin_only",
"get_network:apic:synchronization_state": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",

View File

@ -0,0 +1,133 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from neutron._i18n import _LI
LOG = None
NAME_TYPE_TENANT = 'tenant'
NAME_TYPE_NETWORK = 'network'
NAME_TYPE_POLICY_TARGET_GROUP = 'policy_target_group'
MAX_APIC_NAME_LENGTH = 46
# TODO(rkukura): This is name mapper is copied from the apicapi repo,
# and modified to pass in resource names rather than calling the core
# plugin to get them, and to use the existing DB session. We need
# decide whether to make these changes in apicapi (maybe on a branch),
# move this some other repo, or keep it here. The changes are not
# backwards compatible. The implementation should also be cleaned up
# and simplified. For example, sessions should be passed in place of
# contexts, and the core plugin calls eliminated.
def truncate(string, max_length):
if max_length < 0:
return ''
return string[:max_length] if len(string) > max_length else string
class APICNameMapper(object):
def __init__(self, db, log):
self.db = db
self.min_suffix = 5
global LOG
LOG = log.getLogger(__name__)
def mapper(name_type):
"""Wrapper to land all the common operations between mappers."""
def wrap(func):
def inner(inst, session, resource_id, resource_name=None):
saved_name = inst.db.get_apic_name(session,
resource_id,
name_type)
if saved_name:
result = saved_name[0]
return result
name = ''
try:
name = func(inst, session, resource_id, resource_name)
except Exception as e:
LOG.warn(("Exception in looking up name %s"), name_type)
LOG.error(e.message)
purged_id = re.sub(r"-+", "-", resource_id)
result = purged_id[:inst.min_suffix]
if name:
name = re.sub(r"-+", "-", name)
# Keep as many uuid chars as possible
id_suffix = "_" + result
max_name_length = MAX_APIC_NAME_LENGTH - len(id_suffix)
result = truncate(name, max_name_length) + id_suffix
result = truncate(result, MAX_APIC_NAME_LENGTH)
# Remove forbidden whitespaces
result = result.replace(' ', '')
result = inst._grow_id_if_needed(
session, purged_id, name_type, result,
start=inst.min_suffix)
else:
result = purged_id
inst.db.add_apic_name(session, resource_id,
name_type, result)
return result
return inner
return wrap
def _grow_id_if_needed(self, session, resource_id, name_type,
current_result, start=0):
result = current_result
if result.endswith('_'):
result = result[:-1]
try:
x = 0
while True:
if self.db.get_filtered_apic_names(session,
neutron_type=name_type,
apic_name=result):
if x == 0 and start == 0:
result += '_'
# This name overlaps, add more ID characters
result += resource_id[start + x]
x += 1
else:
break
except AttributeError:
LOG.info(_LI("Current DB API doesn't support "
"get_filtered_apic_names."))
except IndexError:
LOG.debug("Ran out of ID characters.")
return result
@mapper(NAME_TYPE_TENANT)
def tenant(self, session, tenant_id, tenant_name=None):
return tenant_name
@mapper(NAME_TYPE_NETWORK)
def network(self, session, network_id, network_name=None):
return network_name
@mapper(NAME_TYPE_POLICY_TARGET_GROUP)
def policy_target_group(self, session, policy_target_group_id,
policy_target_group_name=None):
return policy_target_group_name
def delete_apic_name(self, session, object_id):
self.db.delete_apic_name(session, object_id)

View File

@ -0,0 +1,82 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import auth as ksc_auth
from keystoneclient import session as ksc_session
from keystoneclient.v3 import client as ksc_client
from neutron._i18n import _LW
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
# REVISIT(rkukura): We use keystone to get the name of the keystone
# project owning each neutron resource, which by default, requires
# admin. If we keep this, we should probably move it to a separate
# config module. But we should also investigate whether admin is even
# needed, or if neutron's credentials could somehow be used.
AUTH_GROUP = 'apic_aim_auth'
ksc_session.Session.register_conf_options(cfg.CONF, AUTH_GROUP)
ksc_auth.register_conf_options(cfg.CONF, AUTH_GROUP)
class ProjectNameCache(object):
"""Cache of Keystone project ID to project name mappings."""
def __init__(self):
self.project_names = {}
self.keystone = None
def ensure_project(self, project_id):
"""Ensure cache contains mapping for project.
:param project_id: ID of the project
Ensure that the cache contains a mapping for the project
identified by project_id. If it is not, Keystone will be
queried for the current list of projects, and any new mappings
will be added to the cache. This method should never be called
inside a transaction with a project_id not already in the
cache.
"""
if project_id not in self.project_names:
if self.keystone is None:
LOG.debug("Getting keystone client")
auth = ksc_auth.load_from_conf_options(cfg.CONF, AUTH_GROUP)
LOG.debug("Got auth: %s" % auth)
if not auth:
LOG.warning(_LW('No auth_plugin configured in %s'),
AUTH_GROUP)
session = ksc_session.Session.load_from_conf_options(
cfg.CONF, AUTH_GROUP, auth=auth)
LOG.debug("Got session: %s" % session)
self.keystone = ksc_client.Client(session=session)
LOG.debug("Got client: %s" % self.keystone)
LOG.debug("Calling project API")
projects = self.keystone.projects.list()
LOG.debug("Received projects: %s" % projects)
for project in projects:
self.project_names[project.id] = project.name
def get_project_name(self, project_id):
"""Get name of project from cache.
:param project_id: ID of the project
Get the name of the project identified by project_id from the
cache. If the cache contains project_id, the project's name is
returned. If not, None is returned.
"""
return self.project_names.get(project_id)

View File

@ -0,0 +1,57 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron._i18n import _LI
from neutron.api import extensions
from neutron import manager as n_manager
from neutron.plugins.ml2 import driver_api
from oslo_log import log
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
extensions as extensions_pkg)
LOG = log.getLogger(__name__)
class ApicExtensionDriver(driver_api.ExtensionDriver):
def __init__(self):
LOG.info(_LI("APIC AIM ED __init__"))
self._mechanism_driver = None
def initialize(self):
LOG.info(_LI("APIC AIM ED initializing"))
extensions.append_api_extensions_path(extensions_pkg.__path__)
@property
def _md(self):
if not self._mechanism_driver:
# REVISIT(rkukura): It might be safer to search the MDs by
# class rather than index by name, or to use a class
# variable to find the instance.
plugin = n_manager.NeutronManager.get_plugin()
mech_mgr = plugin.mechanism_manager
self._mechanism_driver = mech_mgr.mech_drivers['apic_aim'].obj
return self._mechanism_driver
@property
def extension_alias(self):
return "cisco-apic"
def extend_network_dict(self, session, base_model, result):
self._md.extend_network_dict(session, base_model, result)
def extend_subnet_dict(self, session, base_model, result):
self._md.extend_subnet_dict(session, base_model, result)

View File

@ -0,0 +1,69 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.extensions import address_scope
ALIAS = 'cisco-apic'
DIST_NAMES = 'apic:distinguished_names'
SYNC_STATE = 'apic:synchronization_state'
BD = 'BridgeDomain'
CTX = 'Context'
EPG = 'EndpointGroup'
SUBNET = 'Subnet'
SYNC_SYNCED = 'synced'
SYNC_PENDING = 'pending'
SYNC_FAILED = 'failed'
APIC_ATTRIBUTES = {
DIST_NAMES: {'allow_post': False, 'allow_put': False, 'is_visible': True},
SYNC_STATE: {'allow_post': False, 'allow_put': False, 'is_visible': True}
}
EXTENDED_ATTRIBUTES_2_0 = {
attributes.NETWORKS: APIC_ATTRIBUTES,
attributes.SUBNETS: APIC_ATTRIBUTES,
address_scope.ADDRESS_SCOPES: APIC_ATTRIBUTES
}
class Cisco_apic(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Cisco APIC"
@classmethod
def get_alias(cls):
return ALIAS
@classmethod
def get_description(cls):
return ("Extension exposing mapping of Neutron resources to Cisco "
"APIC constructs")
@classmethod
def get_updated(cls):
return "2016-03-31T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}

View File

@ -0,0 +1,589 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aim import aim_manager
from aim.api import resource as aim_resource
from aim import context as aim_context
from neutron._i18n import _LE
from neutron._i18n import _LI
from neutron._i18n import _LW
from neutron.agent.linux import dhcp
from neutron.common import constants as n_constants
from neutron.common import rpc as n_rpc
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import rpc as ml2_rpc
from opflexagent import constants as ofcst
from opflexagent import rpc as o_rpc
from oslo_log import log
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import cache
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim.extensions import (
cisco_apic)
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import model
LOG = log.getLogger(__name__)
AP_NAME = 'NeutronAP'
AGENT_TYPE_DVS = 'DVS agent'
VIF_TYPE_DVS = 'dvs'
PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP,
n_constants.DEVICE_OWNER_LOADBALANCER]
class ApicMechanismDriver(api_plus.MechanismDriver):
def __init__(self):
LOG.info(_LI("APIC AIM MD __init__"))
def initialize(self):
LOG.info(_LI("APIC AIM MD initializing"))
self.project_name_cache = cache.ProjectNameCache()
self.db = model.DbModel()
self.name_mapper = apic_mapper.APICNameMapper(self.db, log)
self.aim = aim_manager.AimManager()
# REVISIT(rkukura): Read from config or possibly from AIM?
self.enable_dhcp_opt = True
self.enable_metadata_opt = True
self._setup_opflex_rpc_listeners()
def _setup_opflex_rpc_listeners(self):
self.opflex_endpoints = [o_rpc.GBPServerRpcCallback(self)]
self.opflex_topic = o_rpc.TOPIC_OPFLEX
self.opflex_conn = n_rpc.create_connection(new=True)
self.opflex_conn.create_consumer(
self.opflex_topic, self.opflex_endpoints, fanout=False)
self.opflex_conn.consume_in_threads()
def ensure_tenant(self, plugin_context, tenant_id):
LOG.info(_LI("APIC AIM MD ensuring tenant_id: %s"), tenant_id)
self.project_name_cache.ensure_project(tenant_id)
# TODO(rkukura): Move the following to precommit methods so
# AIM tenants and application profiles are created whenever
# needed.
session = plugin_context.session
with session.begin(subtransactions=True):
project_name = self.project_name_cache.get_project_name(tenant_id)
tenant_name = self.name_mapper.tenant(session, tenant_id,
project_name)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
aim_ctx = aim_context.AimContext(session)
tenant = aim_resource.Tenant(name=tenant_name)
if not self.aim.get(aim_ctx, tenant):
self.aim.create(aim_ctx, tenant)
ap = aim_resource.ApplicationProfile(tenant_name=tenant_name,
name=AP_NAME)
if not self.aim.get(aim_ctx, ap):
self.aim.create(aim_ctx, ap)
def create_network_precommit(self, context):
LOG.info(_LI("APIC AIM MD creating network: %s"), context.current)
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
id = context.current['id']
name = context.current['name']
bd_name = self.name_mapper.network(session, id, name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': id, 'name': name, 'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
self.aim.create(aim_ctx, bd)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name=AP_NAME,
name=bd_name)
self.aim.create(aim_ctx, epg)
def delete_network_precommit(self, context):
LOG.info(_LI("APIC AIM MD deleting network: %s"), context.current)
session = context._plugin_context.session
tenant_id = context.current['tenant_id']
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
id = context.current['id']
bd_name = self.name_mapper.network(session, id)
LOG.info(_LI("Mapped network_id %(id)s to %(apic_name)s"),
{'id': id, 'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name=AP_NAME,
name=bd_name)
self.aim.delete(aim_ctx, epg)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
self.aim.delete(aim_ctx, bd)
self.name_mapper.delete_apic_name(session, id)
def extend_network_dict(self, session, base_model, result):
LOG.info(_LI("APIC AIM MD extending dict for network: %s"), result)
sync_state = cisco_apic.SYNC_SYNCED
tenant_id = result['tenant_id']
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
id = result['id']
name = result['name']
bd_name = self.name_mapper.network(session, id, name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': id, 'name': name, 'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name=bd_name)
bd = self.aim.get(aim_ctx, bd)
LOG.debug("got BD with DN: %s", bd.dn)
epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name=AP_NAME,
name=bd_name)
epg = self.aim.get(aim_ctx, epg)
LOG.debug("got EPG with DN: %s", epg.dn)
result[cisco_apic.DIST_NAMES] = {cisco_apic.BD: bd.dn,
cisco_apic.EPG: epg.dn}
bd_status = self.aim.get_status(aim_ctx, bd)
self._merge_status(sync_state, bd_status)
epg_status = self.aim.get_status(aim_ctx, epg)
self._merge_status(sync_state, epg_status)
result[cisco_apic.SYNC_STATE] = sync_state
def create_subnet_precommit(self, context):
LOG.info(_LI("APIC AIM MD creating subnet: %s"), context.current)
# REVISIT(rkukura): Do we need to do any of the
# constraints/scope stuff?
gateway_ip_mask = self._gateway_ip_mask(context.current)
if gateway_ip_mask:
session = context._plugin_context.session
network_id = context.current['network_id']
# REVISIT(rkukura): Should Ml2Plus extend SubnetContext
# with network?
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
subnet = self.aim.create(aim_ctx, subnet)
subnet_dn = subnet.dn
subnet_status = self.aim.get_status(aim_ctx, subnet)
sync_state = cisco_apic.SYNC_SYNCED
self._merge_status(sync_state, subnet_status)
# ML2 does not extend subnet dict after precommit.
context.current[cisco_apic.DIST_NAMES] = {cisco_apic.SUBNET:
subnet_dn}
context.current[cisco_apic.SYNC_STATE] = sync_state
def update_subnet_precommit(self, context):
LOG.info(_LI("APIC AIM MD updating subnet: %s"), context.current)
if context.current['gateway_ip'] != context.original['gateway_ip']:
session = context._plugin_context.session
network_id = context.current['network_id']
# REVISIT(rkukura): Should Ml2Plus extend SubnetContext
# with network?
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
gateway_ip_mask = self._gateway_ip_mask(context.original)
if gateway_ip_mask:
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
self.aim.delete(aim_ctx, subnet)
gateway_ip_mask = self._gateway_ip_mask(context.current)
if gateway_ip_mask:
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
subnet = self.aim.create(aim_ctx, subnet)
subnet_dn = subnet.dn
subnet_status = self.aim.get_status(aim_ctx, subnet)
sync_state = cisco_apic.SYNC_SYNCED
self._merge_status(sync_state, subnet_status)
# ML2 does not extend subnet dict after precommit.
context.current[cisco_apic.DIST_NAMES] = {cisco_apic.SUBNET:
subnet_dn}
context.current[cisco_apic.SYNC_STATE] = sync_state
def delete_subnet_precommit(self, context):
LOG.info(_LI("APIC AIM MD deleting subnet: %s"), context.current)
gateway_ip_mask = self._gateway_ip_mask(context.current)
if gateway_ip_mask:
session = context._plugin_context.session
network_id = context.current['network_id']
# REVISIT(rkukura): Should Ml2Plus extend SubnetContext
# with network?
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
self.aim.delete(aim_ctx, subnet)
def extend_subnet_dict(self, session, base_model, result):
LOG.info(_LI("APIC AIM MD extending dict for subnet: %s"), result)
subnet_dn = None
sync_state = cisco_apic.SYNC_SYNCED
gateway_ip_mask = self._gateway_ip_mask(result)
if gateway_ip_mask:
network_id = result['network_id']
network = (session.query(models_v2.Network).
filter_by(id=network_id).
one())
tenant_id = network.tenant_id
tenant_name = self.name_mapper.tenant(session, tenant_id)
LOG.info(_LI("Mapped tenant_id %(id)s to %(apic_name)s"),
{'id': tenant_id, 'apic_name': tenant_name})
network_name = network.name
bd_name = self.name_mapper.network(session, network_id,
network_name)
LOG.info(_LI("Mapped network_id %(id)s with name %(name)s to "
"%(apic_name)s"),
{'id': network_id, 'name': network_name,
'apic_name': bd_name})
aim_ctx = aim_context.AimContext(session)
subnet = aim_resource.Subnet(tenant_name=tenant_name,
bd_name=bd_name,
gw_ip_mask=gateway_ip_mask)
subnet = self.aim.get(aim_ctx, subnet)
if subnet:
LOG.debug("got Subnet with DN: %s", subnet.dn)
subnet_dn = subnet.dn
subnet_status = self.aim.get_status(aim_ctx, subnet)
self._merge_status(sync_state, subnet_status)
else:
# This should always get replaced with the real DN
# during precommit.
subnet_dn = "AIM Subnet not yet created"
result[cisco_apic.DIST_NAMES] = {cisco_apic.SUBNET: subnet_dn}
result[cisco_apic.SYNC_STATE] = sync_state
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on network %(net)s",
{'port': context.current['id'],
'net': context.network.current['id']})
# TODO(rkukura): Add support for baremetal hosts, SR-IOV and
# other situations requiring dynamic segments.
# Check the VNIC type.
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in [portbindings.VNIC_NORMAL]:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
# For compute ports, try to bind DVS agent first.
if context.current['device_owner'].startswith('compute:'):
if self._agent_bind_port(context, AGENT_TYPE_DVS,
self._dvs_bind_port):
return
# Try to bind OpFlex agent.
self._agent_bind_port(context, ofcst.AGENT_TYPE_OPFLEX_OVS,
self._opflex_bind_port)
def _agent_bind_port(self, context, agent_type, bind_strategy):
for agent in context.host_agents(agent_type):
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if bind_strategy(context, segment, agent):
LOG.debug("Bound using segment: %s", segment)
else:
LOG.warning(_LW("Refusing to bind port %(port)s to dead "
"agent: %(agent)s"),
{'port': context.current['id'], 'agent': agent})
def _opflex_bind_port(self, context, segment, agent):
network_type = segment[api.NETWORK_TYPE]
if network_type == ofcst.TYPE_OPFLEX:
opflex_mappings = agent['configurations'].get('opflex_networks')
LOG.debug("Checking segment: %(segment)s "
"for physical network: %(mappings)s ",
{'segment': segment, 'mappings': opflex_mappings})
if (opflex_mappings is not None and
segment[api.PHYSICAL_NETWORK] not in opflex_mappings):
return False
elif network_type != 'local':
return False
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False})
def _dvs_bind_port(self, context, segment, agent):
# TODO(rkukura): Implement DVS port binding
return False
# RPC Method
def get_gbp_details(self, context, **kwargs):
LOG.debug("APIC AIM MD handling get_gbp_details for: %s", kwargs)
try:
return self._get_gbp_details(context, kwargs)
except Exception as e:
device = kwargs.get('device')
LOG.error(_LE("An exception has occurred while retrieving device "
"gbp details for %s"), device)
LOG.exception(e)
return {'device': device}
def request_endpoint_details(self, context, **kwargs):
LOG.debug("APIC AIM MD handling get_endpoint_details for: %s", kwargs)
try:
request = kwargs.get('request')
result = {'device': request['device'],
'timestamp': request['timestamp'],
'request_id': request['request_id'],
'gbp_details': None,
'neutron_details': None}
result['gbp_details'] = self._get_gbp_details(context, request)
result['neutron_details'] = ml2_rpc.RpcCallbacks(
None, None).get_device_details(context, **request)
return result
except Exception as e:
LOG.error(_LE("An exception has occurred while requesting device "
"gbp details for %s"), request.get('device'))
LOG.exception(e)
return None
def _get_gbp_details(self, context, request):
device = request.get('device')
host = request.get('host')
core_plugin = manager.NeutronManager.get_plugin()
port_id = core_plugin._device_to_port_id(context, device)
port_context = core_plugin.get_bound_port_context(context, port_id,
host)
if not port_context:
LOG.warning(_LW("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': port_id,
'agent_id': request.get('agent_id')})
return {'device': device}
port = port_context.current
if port[portbindings.HOST_ID] != host:
LOG.warning(_LW("Device %(device)s requested by agent "
"%(agent_id)s not found bound for host %(host)s"),
{'device': port_id, 'host': host,
'agent_id': request.get('agent_id')})
return
session = context.session
with session.begin(subtransactions=True):
# REVISIT(rkukura): Should AIM resources be
# validated/created here if necessary? Also need to avoid
# creating any new name mappings without first getting
# their resource names.
# TODO(rkukura): For GBP, we need to use the EPG
# associated with the port's PT's PTG. For now, we just use the
# network's default EPG.
# TODO(rkukura): Use common tenant for shared networks.
# TODO(rkukura): Scope the tenant's AIM name.
network = port_context.network.current
epg_tenant_name = self.name_mapper.tenant(session,
network['tenant_id'])
epg_name = self.name_mapper.network(session, network['id'], None)
promiscuous_mode = port['device_owner'] in PROMISCUOUS_TYPES
details = {'allowed_address_pairs': port['allowed_address_pairs'],
'app_profile_name': AP_NAME,
'device': device,
'enable_dhcp_optimization': self.enable_dhcp_opt,
'enable_metadata_optimization': self.enable_metadata_opt,
'endpoint_group_name': epg_name,
'host': host,
'l3_policy_id': network['tenant_id'], # TODO(rkukura)
'mac_address': port['mac_address'],
'port_id': port_id,
'promiscuous_mode': promiscuous_mode,
'ptg_tenant': epg_tenant_name,
'subnets': self._get_subnet_details(core_plugin, context,
port)}
if port['device_owner'].startswith('compute:') and port['device_id']:
# REVISIT(rkukura): Do we need to map to name using nova client?
details['vm-name'] = port['device_id']
# TODO(rkukura): Mark active allowed_address_pairs
# TODO(rkukura): Add the following details common to the old
# GBP and ML2 drivers: floating_ip, host_snat_ips, ip_mapping,
# vrf_name, vrf_subnets, vrf_tenant.
# TODO(rkukura): Add the following details unique to the old
# ML2 driver: attestation, interface_mtu.
# TODO(rkukura): Add the following details unique to the old
# GBP driver: extra_details, extra_ips, fixed_ips,
# l2_policy_id.
return details
def _get_subnet_details(self, core_plugin, context, port):
subnets = core_plugin.get_subnets(
context,
filters={'id': [ip['subnet_id'] for ip in port['fixed_ips']]})
for subnet in subnets:
dhcp_ips = set()
for port in core_plugin.get_ports(
context, filters={
'network_id': [subnet['network_id']],
'device_owner': [n_constants.DEVICE_OWNER_DHCP]}):
dhcp_ips |= set([x['ip_address'] for x in port['fixed_ips']
if x['subnet_id'] == subnet['id']])
dhcp_ips = list(dhcp_ips)
if not subnet['dns_nameservers']:
# Use DHCP namespace port IP
subnet['dns_nameservers'] = dhcp_ips
# Ser Default route if needed
metadata = default = False
if subnet['ip_version'] == 4:
for route in subnet['host_routes']:
if route['destination'] == '0.0.0.0/0':
default = True
if route['destination'] == dhcp.METADATA_DEFAULT_CIDR:
metadata = True
# Set missing routes
if not default:
subnet['host_routes'].append(
{'destination': '0.0.0.0/0',
'nexthop': subnet['gateway_ip']})
if not metadata and dhcp_ips and not self.enable_metadata_opt:
subnet['host_routes'].append(
{'destination': dhcp.METADATA_DEFAULT_CIDR,
'nexthop': dhcp_ips[0]})
subnet['dhcp_server_ips'] = dhcp_ips
return subnets
def _merge_status(self, sync_state, status):
if status.is_error():
sync_state = cisco_apic.SYNC_ERROR
elif status.is_build() and sync_state is not cisco_apic.SYNC_ERROR:
sync_state = cisco_apic.SYNC_BUILD
def _gateway_ip_mask(self, subnet):
gateway_ip = subnet['gateway_ip']
if gateway_ip:
prefix_len = subnet['cidr'].split('/')[1]
return gateway_ip + '/' + prefix_len

View File

@ -0,0 +1,63 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from apic_ml2.neutron.plugins.ml2.drivers.cisco.apic import (
apic_model as old_model)
from neutron._i18n import _LI
from oslo_log import log
from sqlalchemy import orm
LOG = log.getLogger(__name__)
# REVISIT(rkukura): Temporarily using ApicName model defined in old
# apic-ml2 driver with migration in neutron. We should define our
# own, and may want to switch to per-resource name mapping tables with
# foriegn keys.
class DbModel(object):
def __init__(self):
LOG.info(_LI("APIC AIM DbModel __init__"))
def add_apic_name(self, session, neutron_id, neutron_type, apic_name):
name = old_model.ApicName(neutron_id=neutron_id,
neutron_type=neutron_type,
apic_name=apic_name)
with session.begin(subtransactions=True):
session.add(name)
def get_apic_name(self, session, neutron_id, neutron_type):
return session.query(old_model.ApicName.apic_name).filter_by(
neutron_id=neutron_id, neutron_type=neutron_type).first()
def delete_apic_name(self, session, neutron_id):
with session.begin(subtransactions=True):
try:
session.query(old_model.ApicName).filter_by(
neutron_id=neutron_id).delete()
except orm.exc.NoResultFound:
return
def get_filtered_apic_names(self, session, neutron_id=None,
neutron_type=None, apic_name=None):
query = session.query(old_model.ApicName.apic_name)
if neutron_id:
query = query.filter_by(neutron_id=neutron_id)
if neutron_type:
query = query.filter_by(neutron_type=neutron_type)
if apic_name:
query = query.filter_by(apic_name=apic_name)
return query.all()

View File

@ -0,0 +1,294 @@
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aim.db import model_base as aim_model_base
from keystoneclient.v3 import client as ksc_client
from neutron import context
from neutron.db import api as db_api
from neutron import manager
from neutron.plugins.ml2 import config
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from opflexagent import constants as ofcst
PLUGIN_NAME = 'gbpservice.neutron.plugins.ml2plus.plugin.Ml2PlusPlugin'
AGENT_CONF_OPFLEX = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic',
'agent_type': ofcst.AGENT_TYPE_OPFLEX_OVS,
'configurations': {
'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
# REVISIT(rkukura): Use mock for this instead?
class FakeTenant(object):
def __init__(self, id, name):
self.id = id
self.name = name
class FakeProjectManager(object):
def list(self):
return [FakeTenant('test-tenant', 'TestTenantName'),
FakeTenant('bad_tenant_id', 'BadTenantName')]
class FakeKeystoneClient(object):
def __init__(self, **kwargs):
self.projects = FakeProjectManager()
class ApicAimTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'apic_aim'],
'ml2')
config.cfg.CONF.set_override('extension_drivers',
['apic_aim'],
'ml2')
config.cfg.CONF.set_override('type_drivers',
['opflex', 'local', 'vlan'],
'ml2')
config.cfg.CONF.set_override('tenant_network_types',
['opflex'],
'ml2')
config.cfg.CONF.set_override('network_vlan_ranges',
['physnet1:1000:1099'],
group='ml2_type_vlan')
super(ApicAimTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
self.saved_keystone_client = ksc_client.Client
ksc_client.Client = FakeKeystoneClient
engine = db_api.get_engine()
aim_model_base.Base.metadata.create_all(engine)
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.start_rpc_listeners()
self.driver = self.plugin.mechanism_manager.mech_drivers[
'apic_aim'].obj
def tearDown(self):
ksc_client.Client = self.saved_keystone_client
super(ApicAimTestCase, self).tearDown()
class TestApicExtension(ApicAimTestCase):
def _verify_dn(self, dist_names, key, mo_types, id):
dn = dist_names.get(key)
self.assertIsInstance(dn, basestring)
self.assertEqual('uni/', dn[:4])
for mo_type in mo_types:
self.assertIn('/' + mo_type + '-', dn)
self.assertIn(id, dn)
def _verify_no_dn(self, dist_names, key):
self.assertIn(key, dist_names)
self.assertIsNone(dist_names.get(key))
def _verify_network_dist_names(self, net):
id = net['id']
dist_names = net.get('apic:distinguished_names')
self.assertIsInstance(dist_names, dict)
self._verify_dn(dist_names, 'BridgeDomain', ['tn', 'BD'], id[:5])
self._verify_dn(dist_names, 'EndpointGroup', ['tn', 'ap', 'epg'],
id[:5])
def test_network(self):
# Test create.
net = self._make_network(self.fmt, 'net1', True)['network']
net_id = net['id']
self._verify_network_dist_names(net)
# Test show.
res = self._show('networks', net_id)['network']
self._verify_network_dist_names(res)
# Test update.
data = {'network': {'name': 'newnamefornet'}}
res = self._update('networks', net_id, data)['network']
self._verify_network_dist_names(res)
def _verify_subnet_dist_names(self, subnet):
dist_names = subnet.get('apic:distinguished_names')
self.assertIsInstance(dist_names, dict)
if subnet['gateway_ip']:
id = subnet['gateway_ip'] + '/' + subnet['cidr'].split('/')[1]
self._verify_dn(dist_names, 'Subnet', ['tn', 'BD', 'subnet'], id)
else:
self._verify_no_dn(dist_names, 'Subnet')
def test_subnet_without_gw(self):
# Test create without gateway.
net = self._make_network(self.fmt, 'net', True)
pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
subnet = self._make_subnet(self.fmt, net, None,
'10.0.0.0/24',
allocation_pools=pools)['subnet']
subnet_id = subnet['id']
self._verify_subnet_dist_names(subnet)
# Test show.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
# Test update.
data = {'subnet': {'name': 'newnameforsubnet'}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test update adding gateay.
data = {'subnet': {'gateway_ip': '10.0.0.1'}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test show after adding gateway.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
def test_subnet_with_gw(self):
# Test create.
net = self._make_network(self.fmt, 'net', True)
subnet = self._make_subnet(self.fmt, net, '10.0.1.1',
'10.0.1.0/24')['subnet']
subnet_id = subnet['id']
self._verify_subnet_dist_names(subnet)
# Test show.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
# Test update.
data = {'subnet': {'name': 'newnameforsubnet'}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test update removing gateway.
data = {'subnet': {'gateway_ip': None}}
res = self._update('subnets', subnet_id, data)['subnet']
self._verify_subnet_dist_names(res)
# Test show after removing gateway.
res = self._show('subnets', subnet_id)['subnet']
self._verify_subnet_dist_names(res)
class TestPortBinding(ApicAimTestCase):
def _register_agent(self, host, agent_conf):
agent = {'host': host}
agent.update(agent_conf)
self.plugin.create_or_update_agent(context.get_admin_context(), agent)
def _bind_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'compute:',
'device_id': 'someid'}}
req = self.new_update_request('ports', data, port_id,
self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
def test_bind_opflex_agent(self):
self._register_agent('host1', AGENT_CONF_OPFLEX)
net = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')
port = self._make_port(self.fmt, net['network']['id'])['port']
port_id = port['id']
port = self._bind_port_to_host(port_id, 'host1')['port']
self.assertEqual('ovs', port['binding:vif_type'])
self.assertEqual({'port_filter': False, 'ovs_hybrid_plug': False},
port['binding:vif_details'])
# Verify get_gbp_details.
device = 'tap%s' % port_id
details = self.driver.get_gbp_details(context.get_admin_context(),
device=device, host='host1')
self.assertEqual(port['allowed_address_pairs'],
details['allowed_address_pairs'])
self.assertEqual('NeutronAP', details['app_profile_name'])
self.assertEqual(device, details['device'])
self.assertTrue(details['enable_dhcp_optimization'])
self.assertTrue(details['enable_metadata_optimization'])
self.assertIn('net1', details['endpoint_group_name'])
self.assertEqual('host1', details['host'])
self.assertEqual('test-tenant', details['l3_policy_id'])
self.assertEqual(port['mac_address'], details['mac_address'])
self.assertEqual(port_id, details['port_id'])
self.assertFalse(details['promiscuous_mode'])
self.assertIn('TestTenantName', details['ptg_tenant'])
self.assertEqual(1, len(details['subnets']))
self.assertEqual('someid', details['vm-name'])
# Verify request_endpoint_details.
req_details = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % port_id, 'host': 'host1',
'timestamp': 0, 'request_id': 'a_request_id'})
self.assertEqual(details, req_details['gbp_details'])
self.assertEqual(port_id, req_details['neutron_details']['port_id'])
# TODO(rkukura): Verify subnet details. Also, test with
# variations of DHCP IPs on subnet, dns_nameservers and
# host_routes values, etc..
# TODO(rkukura): Add tests for promiscuous_mode cases.
def test_bind_unsupported_vnic_type(self):
net = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')
vnic_arg = {'binding:vnic_type': 'macvtap'}
port = self._make_port(self.fmt, net['network']['id'],
arg_list=('binding:vnic_type',),
**vnic_arg)['port']
port = self._bind_port_to_host(port['id'], 'host1')['port']
self.assertEqual('binding_failed', port['binding:vif_type'])
# TODO(rkukura): Add tests for opflex, local and unsupported
# network_type values.
class TestMl2BasicGet(test_plugin.TestBasicGet,
ApicAimTestCase):
pass
class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,
ApicAimTestCase):
pass
class TestMl2PortsV2(test_plugin.TestPortsV2,
ApicAimTestCase):
pass
class TestMl2NetworksV2(test_plugin.TestNetworksV2,
ApicAimTestCase):
pass
class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
ApicAimTestCase):
pass
class TestMl2SubnetPoolsV2(test_plugin.TestSubnetPoolsV2,
ApicAimTestCase):
pass

View File

@ -35,8 +35,6 @@ from opflexagent import constants as ocst
from oslo_config import cfg
from oslo_serialization import jsonutils
sys.modules["apicapi"] = mock.Mock()
from gbpservice.neutron.plugins.ml2.drivers.grouppolicy.apic import driver
from gbpservice.neutron.services.grouppolicy import (
group_policy_context as p_context)
@ -97,6 +95,8 @@ class ApicMappingTestCase(
def setUp(self, sc_plugin=None, nat_enabled=True,
pre_existing_l3out=False, default_agent_conf=True,
ml2_options=None):
self.saved_apicapi = sys.modules["apicapi"]
sys.modules["apicapi"] = mock.Mock()
if default_agent_conf:
self.agent_conf = AGENT_CONF
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
@ -240,6 +240,10 @@ class ApicMappingTestCase(
self.driver.apic_manager.apic.fvCtx.name = echo2
self._db_plugin = n_db.NeutronDbPluginV2()
def tearDown(self):
sys.modules["apicapi"] = self.saved_apicapi
super(ApicMappingTestCase, self).tearDown()
def _build_external_dict(self, name, cidr_exposed, is_edge_nat=False):
ext_info = {
'enable_nat': 'True' if self.nat_enabled else 'False'

View File

@ -59,10 +59,13 @@ gbpservice.neutron.group_policy.policy_drivers =
nuage_gbp_driver = gbpservice.neutron.services.grouppolicy.drivers.nuage.driver:NuageGBPDriver
neutron.ml2.mechanism_drivers =
logger_plus = gbpservice.neutron.tests.unit.plugins.ml2plus.drivers.mechanism_logger:LoggerPlusMechanismDriver
apic_aim = gbpservice.neutron.plugins.ml2plus.drivers.apic_aim.mechanism_driver:ApicMechanismDriver
apic_gbp = gbpservice.neutron.plugins.ml2.drivers.grouppolicy.apic.driver:APICMechanismGBPDriver
nuage_gbp = gbpservice.neutron.plugins.ml2.drivers.grouppolicy.nuage.driver:NuageMechanismGBPDriver
odl_gbp = gbpservice.neutron.plugins.ml2.drivers.grouppolicy.odl.driver:OdlMechanismGBPDriver
stitching_gbp = gbpservice.neutron.plugins.ml2.drivers.grouppolicy.stitching.driver:TrafficStitchingMechanismGBPDriver
neutron.ml2.extension_drivers =
apic_aim = gbpservice.neutron.plugins.ml2plus.drivers.apic_aim.extension_driver:ApicExtensionDriver
gbpservice.neutron.servicechain.servicechain_drivers =
dummy = gbpservice.neutron.services.servicechain.plugins.msc.drivers.dummy_driver:NoopDriver
simplechain_driver = gbpservice.neutron.services.servicechain.plugins.msc.drivers.simplechain_driver:SimpleChainDriver

View File

@ -27,6 +27,7 @@ os-testr>=0.4.1 # Apache-2.0
ddt>=1.0.1 # MIT
pylint==1.4.5 # GNU GPL v2
reno>=0.1.1 # Apache2
pyOpenSSL>=0.13.0,<=0.15.1
# Since version numbers for these are specified in
# https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt,
@ -35,3 +36,4 @@ python-heatclient
python-keystoneclient
-e git+https://github.com/noironetworks/python-opflex-agent.git@master#egg=python-opflexagent-agent
-e git+https://github.com/noironetworks/aci-integration-module.git#egg=aci-integration-module