[VMware] VMware NSX Policy driver

This introduces driver for Vmware NSX Policy.
The driver assumes nsx_v3 core plugin.
It implements direct configuration of NSX Policy endpoint for security
and inherits connectivity functionality from resource mapping driver.

On startup, the driver will configure NSX Policy enforcement point to be
the NSX manager core plugin is running against.

The driver implements the following resource mapping:

Openstack project => NSX Policy domain
GBP group = > NSX Policy group + communication maps
GBP classifier => NSX Policy service
GBP rule set => NSX Policy communication profile

Change-Id: I0d5593b458f7e51c21fc2b34d1ab4d898abb6c51
This commit is contained in:
Anna Khmelnitsky 2017-04-05 16:38:42 -07:00
parent 989ddeca5a
commit e30de6e13d
11 changed files with 1492 additions and 0 deletions

25
devstack/lib/nsx Normal file
View File

@ -0,0 +1,25 @@
# TODO(annak): This is a temporary solution
# nsxlib, which wraps policy API, is under development
# since policy API is yet to be finalized
# we prefer to run against master branch at this point
function prepare_nsx_policy {
NSXLIB_NAME='vmware-nsxlib'
GITDIR[$NSXLIB_NAME]=/opt/stack/vmware-nsxlib
GITREPO[$NSXLIB_NAME]=${NSXLIB_REPO:-${GIT_BASE}/openstack/vmware-nsxlib.git}
GITBRANCH[$NSXLIB_NAME]=${NSXLIB_BRANCH:-master}
if use_library_from_git $NSXLIB_NAME; then
git_clone_by_name $NSXLIB_NAME
setup_dev_lib $NSXLIB_NAME
fi
}
function nsx_configure_neutron {
iniset $NEUTRON_CONF DEFAULT core_plugin "vmware_nsx.plugin.NsxV3Plugin"
iniset $NEUTRON_CONF group_policy policy_drivers "implicit_policy,nsx_policy"
iniset /$Q_PLUGIN_CONF_FILE NSX_POLICY nsx_policy_manager $NSX_POLICY_MANAGER
iniset /$Q_PLUGIN_CONF_FILE NSX_POLICY nsx_policy_username $NSX_POLICY_USERNAME
iniset /$Q_PLUGIN_CONF_FILE NSX_POLICY nsx_policy_password $NSX_POLICY_PASSWORD
iniset /$Q_PLUGIN_CONF_FILE NSX_POLICY nsx_manager_thumbprint $NSX_MANAGER_THUMBPRINT
}

View File

@ -100,11 +100,21 @@ if is_service_enabled group-policy; then
echo_summary "Installing $NFP"
prepare_nfp_image_builder
fi
if [[ $ENABLE_NSX_POLICY = True ]]; then
echo_summary "Installing NSX Policy requirements"
prepare_nsx_policy
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring $GBP"
[[ $ENABLE_APIC_AIM_GATE = False ]] && gbp_configure_nova
[[ $ENABLE_APIC_AIM_GATE = False ]] && gbp_configure_heat
gbp_configure_neutron
if [[ $ENABLE_NSX_POLICY = True ]]; then
echo_summary "Configuring NSX"
nsx_configure_neutron
fi
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Configuring $NFP"
nfp_configure_neutron

View File

@ -5,6 +5,7 @@ ENABLE_APIC_AIM=${ENABLE_APIC_AIM:-False}
ENABLE_APIC_AIM_GATE=${ENABLE_APIC_AIM_GATE:-False}
[[ $ENABLE_APIC_AIM = True ]] && source $DEST/group-based-policy/devstack/lib/apic_aim
[[ $ENABLE_APIC_AIM_GATE = True ]] && source $DEST/group-based-policy/devstack/lib/apic_aim
[[ $ENABLE_NSX_POLICY = True ]] && source $DEST/group-based-policy/devstack/lib/nsx
ENABLE_NFP=${ENABLE_NFP:-False}
[[ $ENABLE_NFP = True ]] && NFP_DEVSTACK_MODE=${NFP_DEVSTACK_MODE:-base}
@ -70,6 +71,12 @@ if [[ $ENABLE_NFP = True ]]; then
[[ $NFP_DEVSTACK_MODE = base ]] && enable_service nfp_base_configurator
[[ $NFP_DEVSTACK_MODE != base ]] && enable_service nfp_config_orchestrator
fi
if [[ $ENABLE_NSX_POLICY = True ]]; then
disable_service q-meta
disable_service q-dhcp
disable_service q-l3
disable_service q-agt
fi
OVS_PHYSICAL_BRIDGE=br-ex

View File

@ -0,0 +1,48 @@
..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
NSX Policy Driver
===================
The NSX Policy driver utilizes VMWare NSX Policy API to provide integration
between Neutron and the VMWare NSX policy solution. The driver assumes
NSXv3 core plugin, which operates against NSXv3 manager.
First phase of support configures security resources on NSX Policy. Connectivity
configuration is enforced via neutron objects, using behavior inerited from
resource mapping driver.
Currently, the following GBP -> NSX Policy mappings are implemented:
project -> domain, deployment map
policy classifier -> service
policy rule set -> communication profile
group -> group, communication maps
Note that while neutron security groups are not created to enforce inter-group
connectivity, a single security group per GBP group will be created, for the sake
of connectivity within the group.
DevStack Support
----------------
In order to enable NSX Policy driver, add the following to local.conf when
running devstack::
enable_plugin gbp https://git.openstack.org/openstack/group-based-policy master
ENABLE_NSX_POLICY=True
NSX_POLICY_MANAGER = <nsx policy API IP address>
NSX_POLICY_USERNAME = <nsx policy username>
NSX_POLICY_PASSWORD = <nsx policy password>
NSX_MANAGER = <nsx manager API IP address>
NSX_USER = <nsx manager user>
NSX_PASSWORD = <nsx manager password>
NSX_MANAGER_THUMBPRINT = <thumbprint>
DEFAULT_OVERLAY_TZ_UUID = <default overlay transport zone uuid>
DHCP_PROFILE_UUID = <dhcp profile uuid>
METADATA_PROXY_UUID = <metadata proxy uuid>
DEFAULT_TIER0_ROUTER_UUID = <default tier 0 router uuid>

View File

@ -23,6 +23,7 @@ from neutron.extensions import securitygroup as ext_sg
from neutron_lib import constants as n_const
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as oslo_db_excp
from oslo_log import helpers as log
@ -1381,6 +1382,12 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
self._cached_agent_notifier = None
self._resource_owner_tenant_id = None
@property
def gbp_plugin(self):
if not self._gbp_plugin:
self._gbp_plugin = directory.get_plugin("GROUP_POLICY")
return self._gbp_plugin
def _reject_shared(self, object, type):
if object.get('shared'):
raise exc.InvalidSharedResource(type=type,

View File

@ -0,0 +1,531 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from vmware_nsx.db import db as nsx_db
from vmware_nsxlib import v3
from vmware_nsxlib.v3 import config
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsxlib.v3 import resources as nsx_resources
from gbpservice.neutron.services.grouppolicy.common import constants as g_const
from gbpservice.neutron.services.grouppolicy.common import exceptions as gpexc
from gbpservice.neutron.services.grouppolicy.drivers import (
resource_mapping as api)
LOG = logging.getLogger(__name__)
SINGLE_ENTRY_ID = 'GBP'
DRIVER_NAME = 'NSX Policy driver'
DRIVER_OPT_GROUP = 'NSX_POLICY'
NSX_V3_GROUP = 'nsx_v3'
policy_opts = [
cfg.StrOpt('nsx_policy_manager',
help=_("Nsx Policy manager IP address or host.")),
cfg.StrOpt('nsx_policy_username',
help=_("Nsx Policy username.")),
cfg.StrOpt('nsx_policy_password',
help=_("Nsx Policy password.")),
cfg.StrOpt('nsx_manager_thumbprint',
help=_("Thumbprint of nsx manager"))
]
cfg.CONF.register_opts(policy_opts, DRIVER_OPT_GROUP)
class HierarchicalContractsNotSupported(gpexc.GroupPolicyBadRequest):
message = ("Hierarchy in rule sets is not supported with %s." %
DRIVER_NAME)
class UpdateOperationNotSupported(gpexc.GroupPolicyBadRequest):
message = ("Update operation on this object is not supported with %s." %
DRIVER_NAME)
class ProxyGroupsNotSupported(gpexc.GroupPolicyBadRequest):
message = ("Proxy groups are not supported with %s." % DRIVER_NAME)
def in_name(name):
return name + '_I'
def out_name(name):
return name + '_O'
class NsxPolicyMappingDriver(api.ResourceMappingDriver):
"""Nsx Policy Mapping driver for Group Policy plugin.
This mapping driver is only supported with nsxv3 core plugin.
NSX Manager is the network virtualization appliance configured by the core
plugin.
NSX Policy is a separate appliance that provides grouping API. Behind the
scenes, NSX Policy configures same NSX manager.
At current phase of development, security is achieved via NSX Policy,
while connectivity functionality is inherited from resource mapping driver.
This driver configures services, connectivity rules and grouping objects
on NSX Policy. In addition, it configures logical port tag directly on
NSX manager, in order to provide port membership in the desired group.
The driver does not maintain state of its own (no db extension). This is
for sake of reducing failure recovery problems, at cost of making few more
backend roundtrips.
"""
def get_nsxpolicy_lib(self):
""" Prepare agent for NSX Policy API calls"""
nsxlib_config = config.NsxLibConfig(
nsx_api_managers=[cfg.CONF.NSX_POLICY.nsx_policy_manager],
username=cfg.CONF.NSX_POLICY.nsx_policy_username,
password=cfg.CONF.NSX_POLICY.nsx_policy_password)
return v3.NsxPolicyLib(nsxlib_config)
def get_nsxmanager_client(self):
"""Prepare agent for NSX Manager API calls"""
nsxlib_config = config.NsxLibConfig(
nsx_api_managers=cfg.CONF.nsx_v3.nsx_api_managers,
username=cfg.CONF.nsx_v3.nsx_api_user,
password=cfg.CONF.nsx_v3.nsx_api_password)
return v3.NsxLib(nsxlib_config).client
def initialize(self):
super(NsxPolicyMappingDriver, self).initialize()
self._gbp_plugin = None
self.nsx_policy = self.get_nsxpolicy_lib()
self.policy_api = self.nsx_policy.policy_api
nsx_manager_client = self.get_nsxmanager_client()
self.nsx_port = nsx_resources.LogicalPort(nsx_manager_client)
self._verify_enforcement_point()
# TODO(annak): add validation for core plugin (can only be nsxv3)
def _verify_enforcement_point(self):
"""Configure NSX Policy to enforce grouping rules on NSX Manager"""
# We only support a single NSX manager at this point
nsx_manager_ip = cfg.CONF.nsx_v3.nsx_api_managers[0]
nsx_manager_username = cfg.CONF.nsx_v3.nsx_api_user[0]
nsx_manager_password = cfg.CONF.nsx_v3.nsx_api_password[0]
nsx_manager_thumbprint = cfg.CONF.NSX_POLICY.nsx_manager_thumbprint
epoints = self.nsx_policy.enforcement_point.list()
for ep in epoints:
for conn in ep['connection_info']:
if conn['enforcement_point_address'] == nsx_manager_ip:
LOG.debug('Enforcement point for %s already exists (%s)',
nsx_manager_ip, ep['id'])
return
LOG.info('Creating enforcement point for %s', nsx_manager_ip)
self.nsx_policy.enforcement_point.create_or_overwrite(
name=nsx_manager_ip,
ep_id=SINGLE_ENTRY_ID,
ip_address=nsx_manager_ip,
username=nsx_manager_username,
password=nsx_manager_password,
thumbprint=nsx_manager_thumbprint)
def _generate_nsx_name(self, object_id, object_name):
if object_name:
return object_name + '_' + object_id
return object_id
def _create_domain(self, context):
project_id = context.current['project_id']
tenant_name = context._plugin_context.tenant_name
domain_name = self._generate_nsx_name(project_id, tenant_name)
LOG.info('Creating domain %(domain)s for project %(project)s',
{'domain': domain_name,
'project': project_id})
self.nsx_policy.domain.create_or_overwrite(
name=domain_name,
domain_id=project_id,
description=_('Domain for tenant %s') % tenant_name)
self.nsx_policy.deployment_map.create_or_overwrite(
name=domain_name,
map_id=project_id,
domain_id=project_id,
ep_id=SINGLE_ENTRY_ID)
def _delete_domain(self, project_id):
try:
self.nsx_policy.deployment_map.delete(project_id)
except nsxlib_exc.ResourceNotFound:
LOG.warning('Domain %s is not deployed on backend',
project_id)
try:
self.nsx_policy.domain.delete(project_id)
except nsxlib_exc.ResourceNotFound:
LOG.warning('Domain %s was not found on backend',
project_id)
def _create_or_update_communication_profile(self, profile_id, name,
description, rules,
update_flow=False):
services = [rule['policy_classifier_id']
for rule in rules]
self.nsx_policy.comm_profile.create_or_overwrite(
name=name,
profile_id=profile_id,
description=description,
services=services)
def _split_rules_by_direction(self, context, rules):
in_dir = [g_const.GP_DIRECTION_BI, g_const.GP_DIRECTION_IN]
out_dir = [g_const.GP_DIRECTION_BI, g_const.GP_DIRECTION_OUT]
in_rules = []
out_rules = []
for rule in rules:
classifier = context._plugin.get_policy_classifier(
context._plugin_context,
rule['policy_classifier_id'])
direction = classifier['direction']
if direction in in_dir:
in_rules.append(rule)
if direction in out_dir:
out_rules.append(rule)
return in_rules, out_rules
def _delete_comm_profile(self, comm_profile_id):
try:
self.nsx_policy.comm_profile.delete(comm_profile_id)
except nsxlib_exc.ResourceNotFound:
LOG.error('Communication profile %s not found on backend',
comm_profile_id)
def _create_or_update_policy_rule_set(self, context, update_flow=False):
rule_set_id = context.current['id']
rules = self.gbp_plugin.get_policy_rules(
context._plugin_context,
{'id': context.current['policy_rules']})
in_rules, out_rules = self._split_rules_by_direction(context, rules)
if in_rules:
self._create_or_update_communication_profile(
in_name(rule_set_id),
in_name(context.current['name']),
context.current['description'] + '(ingress)',
in_rules)
elif update_flow:
self._delete_comm_profile(in_name(rule_set_id))
if out_rules:
self._create_or_update_communication_profile(
out_name(rule_set_id),
out_name(context.current['name']),
context.current['description'] + '(egress)',
out_rules)
elif update_flow:
self._delete_comm_profile(out_name(rule_set_id))
def _filter_ptgs_by_ruleset(self, ptgs, ruleset_id):
providing_ptgs = [ptg['id'] for ptg in ptgs
if ruleset_id in ptg['provided_policy_rule_sets']]
consuming_ptgs = [ptg['id'] for ptg in ptgs
if ruleset_id in ptg['consumed_policy_rule_sets']]
return providing_ptgs, consuming_ptgs
def _map_rule_set(self, ptgs, profiles, project_id,
group_id, ruleset_id, delete_flow):
providing_ptgs, consuming_ptgs = self._filter_ptgs_by_ruleset(
ptgs, ruleset_id)
ruleset_in = in_name(ruleset_id)
ruleset_out = out_name(ruleset_id)
if not consuming_ptgs or not providing_ptgs:
if not delete_flow:
return
if not consuming_ptgs and not providing_ptgs:
return
# we need to delete map entry if exists
for ruleset in (ruleset_in, ruleset_out):
if ruleset in profiles:
try:
self.nsx_policy.comm_map.delete(project_id, ruleset)
except nsxlib_exc.ResourceNotFound:
pass
return
if ruleset_in in profiles:
self.nsx_policy.comm_map.create_or_overwrite(
name = ruleset_in,
domain_id=project_id,
map_id=ruleset_in,
description="GBP ruleset ingress",
profile_id=ruleset_in,
source_groups=consuming_ptgs,
dest_groups=providing_ptgs)
if ruleset_out in profiles:
self.nsx_policy.comm_map.create_or_overwrite(
name=ruleset_out,
domain_id=project_id,
map_id=ruleset_out,
description="GBP ruleset egress",
profile_id=ruleset_out,
source_groups=providing_ptgs,
dest_groups=consuming_ptgs)
def _map_group_rule_sets(self, context, group_id,
provided_policy_rule_sets,
consumed_policy_rule_sets,
delete_flow=False):
project_id = context.current['project_id']
profiles = self.nsx_policy.comm_profile.list()
profiles = [p['id'] for p in profiles]
# create communication maps
ptgs = context._plugin.get_policy_target_groups(
context._plugin_context)
for ruleset in provided_policy_rule_sets:
self._map_rule_set(ptgs, profiles, project_id,
group_id, ruleset, delete_flow)
for ruleset in consumed_policy_rule_sets:
self._map_rule_set(ptgs, profiles, project_id,
group_id, ruleset, delete_flow)
# overrides base class, called from base group_create_postcommit
# REVISIT(annak): Suggest a better design for driver-specific callbacks,
# based on connectivity vs. security
def _set_sg_rules_for_subnets(self, context, subnets,
provided_policy_rule_sets,
consumed_policy_rule_sets):
pass
# overrides base class, called from base group_delete_postcommit
def _unset_sg_rules_for_subnets(self, context, subnets,
provided_policy_rule_sets,
consumed_policy_rule_sets):
pass
# Overrides base class
def _update_sgs_on_ptg(self, context, ptg_id,
provided_policy_rule_sets,
consumed_policy_rule_sets, op):
group_id = context.current['id']
self._map_group_rule_sets(
context, group_id,
provided_policy_rule_sets,
consumed_policy_rule_sets,
delete_flow=(op == "DISASSOCIATE"))
def create_policy_action_precommit(self, context):
pass
def create_policy_action_postcommit(self, context):
super(NsxPolicyMappingDriver,
self).create_policy_action_postcommit(context)
def create_policy_classifier_precommit(self, context):
pass
def create_policy_classifier_postcommit(self, context):
classifier = context.current
port_range = classifier['port_range'].split(':', 1)
lower = int(port_range[0])
upper = int(port_range[-1]) + 1
ports = [str(p) for p in range(lower, upper)]
# service entry in nsx policy has single direction
# directions will be enforced on communication profile level
self.nsx_policy.service.create_or_overwrite(
name=classifier['name'],
service_id=classifier['id'],
description=classifier['description'],
protocol=classifier['protocol'],
dest_ports=ports)
def create_policy_rule_precommit(self, context):
pass
def create_policy_rule_postcommit(self, context, transaction=None):
pass
def create_policy_rule_set_precommit(self, context):
if context.current['child_policy_rule_sets']:
raise HierarchicalContractsNotSupported()
def create_policy_rule_set_postcommit(self, context):
self._create_or_update_policy_rule_set(context)
def create_policy_target_precommit(self, context):
super(NsxPolicyMappingDriver,
self).create_policy_target_precommit(context)
def _tag_port(self, context, port_id, tag):
# Translate neutron port id to nsx port id
_net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
context._plugin_context.session, port_id)
self.nsx_port.update(nsx_port_id, None,
tags_update=[{'scope': 'gbp',
'tag': tag}])
def _get_project_ptgs(self, context, project_id):
ptgs = context._plugin.get_policy_target_groups(
context._plugin_context)
return [ptg for ptg in ptgs if ptg['project_id'] == project_id]
def create_policy_target_postcommit(self, context):
if not context.current['port_id']:
self._use_implicit_port(context)
self._tag_port(context,
context.current['port_id'],
context.current['policy_target_group_id'])
# Below is inherited behaviour
self._update_cluster_membership(
context, new_cluster_id=context.current['cluster_id'])
self._associate_fip_to_pt(context)
def create_policy_target_group_precommit(self, context):
if context.current.get('proxied_group_id'):
raise ProxyGroupsNotSupported()
super(NsxPolicyMappingDriver,
self).create_policy_target_group_precommit(context)
def create_policy_target_group_postcommit(self, context):
# create the group on backend
group_id = context.current['id']
project_id = context.current['project_id']
# create the domain for this project if needed
project_ptgs = self._get_project_ptgs(context, project_id)
if len(project_ptgs) == 1:
# we've just created the first group for this project
# need to create a domain for the project on backend
self._create_domain(context)
self.nsx_policy.group.create_or_overwrite(
name=context.current['name'],
domain_id=project_id,
group_id=group_id,
description=context.current['description'],
cond_val=group_id)
# This will take care of connectivity and invoke overriden
# callbacks defined above for security
super(NsxPolicyMappingDriver,
self).create_policy_target_group_postcommit(context)
def delete_policy_target_group_postcommit(self, context):
group_id = context.current['id']
project_id = context.current['project_id']
self.nsx_policy.group.delete(project_id, group_id)
# delete the domain for this project if needed
project_ptgs = self._get_project_ptgs(context, project_id)
if len(project_ptgs) == 0:
# we've just deleted the last group for this project
# need to clean up the project domain on backend
self._delete_domain(project_id)
# This will take care of connectivity and invoke overriden
# callbacks defined above for security
super(NsxPolicyMappingDriver,
self).delete_policy_target_group_postcommit(context)
def delete_policy_classifier_precommit(self, context):
pass
def delete_policy_classifier_postcommit(self, context):
classifier_id = context.current['id']
self.nsx_policy.service.delete(classifier_id)
def delete_policy_rule_set_precommit(self, context):
pass
def delete_policy_rule_set_postcommit(self, context):
ruleset_id = context.current['id']
rules = self.gbp_plugin.get_policy_rules(
context._plugin_context,
{'id': context.current['policy_rules']})
in_rules, out_rules = self._split_rules_by_direction(context, rules)
if in_rules:
self._delete_comm_profile(in_name(ruleset_id))
if out_rules:
self._delete_comm_profile(out_name(ruleset_id))
def delete_policy_target_postcommit(self, context):
# This is inherited behavior without:
# 1. sg disassociation
# 2. proxy handling
port_id = context.current['port_id']
for fip in context.fips:
self._delete_fip(context._plugin_context,
fip.floatingip_id)
self._cleanup_port(context._plugin_context, port_id)
def update_policy_rule_set_precommit(self, context):
self._reject_shared(context.current, 'policy_rule_set')
def update_policy_rule_set_postcommit(self, context):
self._create_or_update_policy_rule_set(context, update_flow=True)
def update_policy_target_precommit(self, context):
# Parent call verifies change of PTG is not supported
super(NsxPolicyMappingDriver,
self).update_policy_target_precommit(context)
def update_policy_target_postcommit(self, context):
# Since change of PTG is not supported, nothing to add here
super(NsxPolicyMappingDriver,
self).update_policy_target_postcommit(context)
def update_policy_rule_precommit(self, context):
raise UpdateOperationNotSupported()
def update_policy_rule_postcommit(self, context):
pass
def update_policy_action_precommit(self, context):
raise UpdateOperationNotSupported()
def update_policy_classifier_precommit(self, context):
pass
def update_policy_classifier_postcommit(self, context):
self.create_policy_classifier_postcommit(context)

View File

@ -0,0 +1,860 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from mock import call
import webob.exc
from neutron.db import api as db_api
from neutron_lib.db import model_base
from oslo_config import cfg
from vmware_nsx.common import config
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
from gbpservice.neutron.services.grouppolicy.drivers.vmware.nsx_policy import (
nsx_policy_mapping as driver)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_rmd)
TEST_PROJECT = 'test-project'
class NsxPolicyMappingTestCase(test_rmd.ResourceMappingTestCase):
def setUp(self):
self.set_up_mocks()
self.set_up_config()
super(NsxPolicyMappingTestCase, self).setUp(
policy_drivers=['implicit_policy', 'nsx_policy'])
# REVISIT (annak): currently run with ML2 plugin
# core_plugin='vmware_nsx.plugin.NsxV3Plugin'
engine = db_api.context_manager.writer.get_engine()
model_base.BASEV2.metadata.create_all(engine)
self.driver = self._gbp_plugin.policy_driver_manager.policy_drivers[
'nsx_policy'].obj
self.nsx_policy = self.driver.nsx_policy
self.nsx_port = self.driver.nsx_port
self._tenant_id = TEST_PROJECT
def tearDown(self):
super(NsxPolicyMappingTestCase, self).tearDown()
def set_up_config(self):
cfg.CONF.register_opts(driver.policy_opts, driver.DRIVER_OPT_GROUP)
cfg.CONF.register_opts(config.nsx_v3_opts, group="nsx_v3")
cfg.CONF.set_override('nsx_policy_manager', '1.1.1.1',
driver.DRIVER_OPT_GROUP)
cfg.CONF.set_override('nsx_api_managers', '1.1.1.1',
driver.NSX_V3_GROUP)
def set_up_mocks(self):
mock.patch("vmware_nsxlib.v3.client.NSX3Client").start()
mock.patch("vmware_nsxlib.v3.policy_resources"
".NsxPolicyEnforcementPointApi").start()
def _mock_domain_create(self):
return mock.patch.object(self.nsx_policy.domain, 'create_or_overwrite')
def _mock_domain_delete(self):
return mock.patch.object(self.nsx_policy.domain, 'delete')
def _mock_service_create(self):
return mock.patch.object(self.nsx_policy.service,
'create_or_overwrite')
def _mock_service_delete(self):
return mock.patch.object(self.nsx_policy.service, 'delete')
def _mock_profile_create(self):
return mock.patch.object(self.nsx_policy.comm_profile,
'create_or_overwrite')
def _mock_nth_profile_create_fails(self, n=2):
self.call_count = 1
def raise_on_nth_call(**kwargs):
if self.call_count == n:
raise nsxlib_exc.ManagerError
else:
self.call_count += 1
return mock.patch.object(self.nsx_policy.comm_profile,
'create_or_overwrite',
side_effect=raise_on_nth_call)
def _mock_profile_delete(self):
return mock.patch.object(self.nsx_policy.comm_profile, 'delete')
def _mock_profile_list(self, profile_ids):
return mock.patch.object(self.nsx_policy.comm_profile, 'list',
return_value=[{'id': p}
for p in profile_ids])
def _mock_group_create(self):
return mock.patch.object(self.nsx_policy.group, 'create_or_overwrite')
def _mock_group_create_fails(self):
return mock.patch.object(self.nsx_policy.group, 'create_or_overwrite',
side_effect=nsxlib_exc.ManagerError)
def _mock_group_delete(self):
return mock.patch.object(self.nsx_policy.group, 'delete')
def _mock_map_create(self):
return mock.patch.object(self.nsx_policy.comm_map,
'create_or_overwrite')
def _mock_map_delete(self):
return mock.patch.object(self.nsx_policy.comm_map, 'delete')
def _mock_map_create_fails(self):
return mock.patch.object(self.nsx_policy.comm_map,
'create_or_overwrite',
side_effect=nsxlib_exc.ManagerError)
def _mock_nth_map_create_fails(self, n=2):
self.call_count = 1
def raise_on_nth_call(**kwargs):
if self.call_count == n:
raise nsxlib_exc.ManagerError
else:
self.call_count += 1
return mock.patch.object(self.nsx_policy.comm_map,
'create_or_overwrite',
side_effect=raise_on_nth_call)
def _mock_policy_create_fails(self):
return mock.patch.object(self.policy_api, 'create_or_overwrite',
side_effect=nsxlib_exc.ManagerError)
def _mock_policy_delete(self):
return mock.patch.object(self.policy_api, 'delete')
def _mock_nsx_db(self):
def mirror_port_id(session, port_id):
return None, port_id
mock.patch('vmware_nsx.db.db.get_nsx_switch_and_port_id',
side_effect=mirror_port_id).start()
def _mock_nsx_port_update(self):
return mock.patch.object(self.nsx_port, 'update')
class TestPolicyClassifier(NsxPolicyMappingTestCase):
def test_create(self):
# Create non-first classifier within tenant
# Should not trigger domain generation on backend
with self._mock_service_create() as service_create_call:
self.create_policy_classifier(name='test',
protocol='TCP',
port_range='80',
direction='bi')
# verify API call to create the service
service_create_call.assert_called_with(
name='test',
description=mock.ANY,
protocol='tcp',
dest_ports=['80'],
service_id=mock.ANY)
def test_create_port_range(self):
with self._mock_service_create() as service_create_call:
self.create_policy_classifier(name='test',
protocol='UDP',
port_range='777:888',
direction='in')
port_list = [str(p) for p in range(777, 889)]
service_create_call.assert_called_with(
name='test',
description=mock.ANY,
protocol='udp',
dest_ports=port_list,
service_id=mock.ANY)
def test_delete(self):
with self._mock_service_create(),\
self._mock_service_delete() as service_delete_call:
classifier = self.create_policy_classifier(
name='test',
protocol='TCP',
port_range='80',
direction='bi')['policy_classifier']
self.delete_policy_classifier(classifier['id'])
service_delete_call.assert_called_with(classifier['id'])
class TestPolicyTargetGroup(NsxPolicyMappingTestCase):
def _prepare_rule_set(self, name='test'):
with self._mock_service_create(),\
self._mock_profile_create():
rule = self._create_simple_policy_rule()
return self.create_policy_rule_set(
name=name, policy_rules=[rule['id']])['policy_rule_set']
def assert_neutron_resources(self, net_count, subnet_count, port_count):
networks = self._plugin.get_networks(self._context)
self.assertEqual(net_count, len(networks))
subnets = self._plugin.get_subnets(self._context)
self.assertEqual(subnet_count, len(subnets))
ports = self._plugin.get_ports(self._context)
self.assertEqual(port_count, len(ports))
def assert_neutron_rollback(self):
self.assert_neutron_resources(0, 0, 0)
def group_call(self, name, group_id):
return call(domain_id=TEST_PROJECT,
name=name,
description=mock.ANY,
cond_val=group_id,
group_id=group_id)
def ingress_map_call(self, prs_id, provider_ids, consumer_ids):
return call(domain_id=TEST_PROJECT,
profile_id=driver.in_name(prs_id),
map_id=mock.ANY,
name=driver.in_name(prs_id),
description=mock.ANY,
source_groups=consumer_ids,
dest_groups=provider_ids)
def egress_map_call(self, prs_id, provider_ids, consumer_ids):
return call(domain_id=TEST_PROJECT,
profile_id=driver.out_name(prs_id),
map_id=mock.ANY,
name=driver.out_name(prs_id),
description=mock.ANY,
source_groups=provider_ids,
dest_groups=consumer_ids)
def test_create_first_ptg_for_project(self):
'''Create first ptg for tenant and verify domain creation'''
with self._mock_domain_create() as domain_create,\
self._mock_group_create() as group_create,\
self._mock_map_create() as map_create:
ptg = self.create_policy_target_group(
name='test')['policy_target_group']
domain_create.assert_called_with(domain_id=TEST_PROJECT,
name=TEST_PROJECT,
description=mock.ANY)
group_create.assert_has_calls([self.group_call('test', ptg['id'])])
map_create.assert_not_called()
def _test_ptg_pair_with_single_rule(self,
direction_in=True,
direction_out=True):
'''Test consumer and producer group pair with single rule lifecycle.
Verify backend group and rule creation calls.
Verify spawned neutron resources.
'''
policy_rule_set = self._prepare_rule_set()
profile_in = driver.in_name(policy_rule_set['id'])
profile_out = driver.out_name(policy_rule_set['id'])
profile_ids = []
if direction_in:
profile_ids.append(profile_in)
if direction_out:
profile_ids.append(profile_out)
# Create group pair
with self._mock_group_create() as group_create,\
self._mock_profile_list(profile_ids),\
self._mock_map_create() as map_create,\
self._mock_domain_create():
provider_ptg, consumer_ptg = self._create_provider_consumer_ptgs(
policy_rule_set['id'])
# validate group creation on backend
calls = [self.group_call('ptg1', provider_ptg),
self.group_call('ptg2', consumer_ptg)]
group_create.assert_has_calls(calls)
# validate communication map creation on backend
calls = []
if direction_in:
calls.append(self.ingress_map_call(policy_rule_set['id'],
[provider_ptg],
[consumer_ptg]))
if direction_out:
calls.append(self.egress_map_call(policy_rule_set['id'],
[provider_ptg],
[consumer_ptg]))
map_create.assert_has_calls(calls)
# validate neutron resources
self.assert_neutron_resources(2, 2, 2)
# Delete producer
with self._mock_map_delete() as map_delete,\
self._mock_profile_list(profile_ids),\
self._mock_group_delete() as group_delete,\
self._mock_domain_delete() as domain_delete:
self.delete_policy_target_group(provider_ptg)
# verify communication map delete on backend
calls = []
if direction_in:
calls.append(call(TEST_PROJECT,
driver.in_name(policy_rule_set['id'])))
if direction_out:
calls.append(call(TEST_PROJECT,
driver.out_name(policy_rule_set['id'])))
map_delete.assert_has_calls(calls)
# verify group delete call
group_delete.assert_called_with(TEST_PROJECT, provider_ptg)
# verify domain not deleted yet
domain_delete.assert_not_called()
# Delete consumer
with self._mock_map_delete() as map_delete,\
self._mock_profile_list(profile_ids),\
self._mock_group_delete() as group_delete,\
self._mock_domain_delete() as domain_delete:
self.delete_policy_target_group(consumer_ptg)
# no deletions on communication map are expected
map_delete.assert_not_called()
# verify group delete call
group_delete.assert_called_with(TEST_PROJECT, consumer_ptg)
# last group is deleted, domain should go as well
domain_delete.assert_called_with(TEST_PROJECT)
def test_create_ptg_pair_with_single_rule_in(self):
self._test_ptg_pair_with_single_rule(True, False)
def test_create_ptg_pair_with_single_rule_out(self):
self._test_ptg_pair_with_single_rule(False, True)
def test_create_ptg_pair_with_single_rule_bi(self):
self._test_ptg_pair_with_single_rule(True, True)
def test_create_fail_isolated(self):
'''Verify integrity when backend fails on isolated group creation.
Verify backend receives a group delete call.
Verify spawned neutron resources are cleaned up.
'''
policy_rule_set = self._prepare_rule_set()
with self._mock_domain_create(),\
self._mock_group_create_fails(),\
self._mock_group_delete() as group_delete,\
self._mock_domain_delete() as domain_delete:
self.assertRaises(webob.exc.HTTPClientError,
self._create_provider_consumer_ptgs,
policy_rule_set['id'])
group_delete.assert_called_with(self._tenant_id,
mock.ANY)
# verify domain deletion since group failed to create
domain_delete.assert_called_with(TEST_PROJECT)
self.assert_neutron_rollback()
def test_create_fail_connected(self):
'''Verify integrity when backend fails on connectivity map creation
This test creates a pair of groups. First group creation succeeds,
while second fails on connectivity enforcement.
Verify backend receives a group delete call for second group.
Verify spawned neutron resources are cleaned up for second group.
'''
policy_rule_set = self._prepare_rule_set()
profile_ids = [driver.in_name(policy_rule_set['id']),
driver.out_name(policy_rule_set['id'])]
with self._mock_group_create(),\
self._mock_profile_list(profile_ids),\
self._mock_map_create_fails(),\
self._mock_group_delete() as group_delete:
self.assertRaises(webob.exc.HTTPClientError,
self._create_provider_consumer_ptgs,
policy_rule_set['id'])
group_delete.assert_called_with(self._tenant_id, mock.ANY)
self.assert_neutron_resources(1, 1, 1)
def test_create_fail_multi_connected(self):
'''Verify integrity when backend fails on connectivity map creation
This test creates three groups a<-->b<==>c
B is created last, and creation fails on its last connectivity
enforcement.
Verify all maps are deleted in cleanup.
Verify spawned neutron resources are cleaned up for third group.
'''
prs1 = self._prepare_rule_set()['id']
prs2 = self._prepare_rule_set()['id']
prs3 = self._prepare_rule_set()['id']
profile_ids = [driver.in_name(prs1), driver.out_name(prs1),
driver.in_name(prs2), driver.out_name(prs2),
driver.in_name(prs3), driver.out_name(prs3)]
# Create a and c
with self._mock_group_create(),\
self._mock_profile_list(profile_ids),\
self._mock_map_create():
ab_dict = {prs1: None}
bc_dict = {prs2: None, prs3: None}
a = self.create_policy_target_group(
name='a',
provided_policy_rule_sets=ab_dict)['policy_target_group']['id']
c = self.create_policy_target_group(
name='c',
consumed_policy_rule_sets=bc_dict)['policy_target_group']['id']
with self._mock_group_create(),\
self._mock_profile_list(profile_ids),\
self._mock_nth_map_create_fails(n=6) as map_create,\
self._mock_map_delete() as map_delete,\
self._mock_group_delete() as group_delete:
self.assertRaises(webob.exc.HTTPClientError,
self.create_policy_target_group,
name='c',
consumed_policy_rule_sets=ab_dict,
provided_policy_rule_sets=bc_dict)
b = mock.ANY
map_create_calls = [self.ingress_map_call(prs1, [a], [b]),
self.egress_map_call(prs1, [a], [b]),
self.ingress_map_call(prs2, [b], [c]),
self.egress_map_call(prs2, [b], [c]),
self.ingress_map_call(prs3, [b], [c]),
self.egress_map_call(prs3, [b], [c])]
map_create.assert_has_calls(map_create_calls, any_order=True)
map_delete_calls = [call(TEST_PROJECT, driver.in_name(prs1)),
call(TEST_PROJECT, driver.out_name(prs1)),
call(TEST_PROJECT, driver.in_name(prs2)),
call(TEST_PROJECT, driver.out_name(prs2)),
call(TEST_PROJECT, driver.in_name(prs3))]
map_delete.assert_has_calls(map_delete_calls, any_order=True)
group_delete.assert_called_with(TEST_PROJECT, mock.ANY)
self.assert_neutron_resources(2, 2, 2)
def test_create_ptg_pair_multi_rule_set(self):
'''Create ptg pair based on 3 rule sets
First rule set is simulated to have only ingress connectivity,
second - only egress connectivity, and third - both
'''
prs1 = self._prepare_rule_set()['id']
prs2 = self._prepare_rule_set()['id']
prs3 = self._prepare_rule_set()['id']
profile_ids = [driver.in_name(prs1),
driver.out_name(prs2),
driver.in_name(prs3),
driver.out_name(prs3)]
with self._mock_domain_create(),\
self._mock_group_create() as group_create,\
self._mock_profile_list(profile_ids),\
self._mock_map_create() as map_create:
rule_set_dict = {prs1: None, prs2: None, prs3: None}
provider_ptg = self.create_policy_target_group(
name='ptg1', provided_policy_rule_sets=rule_set_dict)
provider_id = provider_ptg['policy_target_group']['id']
consumer_ptg = self.create_policy_target_group(
name='ptg2', consumed_policy_rule_sets=rule_set_dict)
consumer_id = consumer_ptg['policy_target_group']['id']
group_create.assert_has_calls(
[self.group_call('ptg1', provider_id),
self.group_call('ptg2', consumer_id)])
map_calls = [
self.ingress_map_call(prs1, [provider_id], [consumer_id]),
self.egress_map_call(prs2, [provider_id], [consumer_id]),
self.ingress_map_call(prs3, [provider_id], [consumer_id]),
self.egress_map_call(prs3, [provider_id], [consumer_id])]
map_create.assert_has_calls(map_calls, any_order=True)
def test_create_ptg_ring(self):
ring_size = 10
prs_ids = []
for i in range(0, ring_size):
prs_ids.append(self._prepare_rule_set()['id'])
profile_ids = [driver.in_name(prs_id) for prs_id in prs_ids]
# Create ring topology
with self._mock_domain_create(),\
self._mock_profile_list(profile_ids),\
self._mock_group_create() as group_create,\
self._mock_map_create() as map_create:
group_calls = []
map_calls = []
ptg_ids = []
for i in range(0, ring_size):
provided_rule_set_dict = {prs_ids[i]: None}
next_i = (i + 1) % ring_size
consumed_rule_set_dict = {prs_ids[next_i]: None}
name = 'ptg_%d' % i
ptg = self.create_policy_target_group(
name=name,
provided_policy_rule_sets=provided_rule_set_dict,
consumed_policy_rule_sets=consumed_rule_set_dict)
ptg_id = ptg['policy_target_group']['id']
ptg_ids.append(ptg_id)
group_calls.append(self.group_call(name, ptg_id))
if i > 0:
map_calls.append(self.ingress_map_call(
prs_ids[i],
[ptg_id],
[ptg_ids[i - 1]]))
map_calls.append(self.ingress_map_call(prs_ids[0],
[ptg_ids[0]],
[ptg_id]))
group_create.assert_has_calls(group_calls)
map_create.assert_has_calls(map_calls, any_order=True)
self.assert_neutron_resources(ring_size, ring_size, ring_size)
# Delete single group and verify connectors are deleted
with self._mock_map_delete() as map_delete,\
self._mock_map_create() as map_create,\
self._mock_profile_list(profile_ids),\
self._mock_group_delete() as group_delete:
ptg_id = ptg_ids[2]
self.delete_policy_target_group(ptg_id)
map_calls = [call(TEST_PROJECT, driver.in_name(prs_ids[2])),
call(TEST_PROJECT, driver.in_name(prs_ids[3]))]
map_delete.assert_has_calls(map_calls)
map_create.assert_not_called()
group_delete.assert_called_with(TEST_PROJECT, ptg_id)
# Remove connectors from single group
with self._mock_map_delete() as map_delete,\
self._mock_map_create() as map_create,\
self._mock_profile_list(profile_ids),\
self._mock_group_delete() as group_delete:
ptg_id = ptg_ids[5]
self.update_policy_target_group(
ptg_id, provided_policy_rule_sets={})
map_calls = [call(TEST_PROJECT, driver.in_name(prs_ids[5]))]
map_delete.assert_has_calls(map_calls)
map_create.assert_not_called()
group_delete.assert_not_called()
def test_create_ptg_star(self):
'''Star-like topology (single producer and N consumers) lifecycle'''
star_size = 10
policy_rule_set = self._prepare_rule_set()
prs_id = policy_rule_set['id']
profile_ids = [driver.in_name(prs_id)]
# Create topology
with self._mock_domain_create(),\
self._mock_profile_list(profile_ids),\
self._mock_group_create() as group_create,\
self._mock_map_create() as map_create:
policy_rule_set_dict = {prs_id: None}
provider_ptg = self.create_policy_target_group(
name='producer',
provided_policy_rule_sets=policy_rule_set_dict)
provider_id = provider_ptg['policy_target_group']['id']
group_calls = [self.group_call('producer', provider_id)]
map_calls = []
consumer_ids = []
for i in range(0, star_size):
name = 'consumer_%d' % i
consumer_ptg = self.create_policy_target_group(
name=name,
consumed_policy_rule_sets=policy_rule_set_dict)
consumer_id = consumer_ptg['policy_target_group']['id']
consumer_ids.append(consumer_id)
group_calls.append(self.group_call(name, consumer_id))
map_calls.append(self.ingress_map_call(
prs_id,
[provider_id],
consumer_ids[:]))
group_create.assert_has_calls(group_calls)
map_create.assert_has_calls(map_calls)
star_size += 1
self.assert_neutron_resources(star_size, star_size, star_size)
# Delete one consumer group
with self._mock_map_delete() as map_delete,\
self._mock_map_create() as map_create,\
self._mock_profile_list(profile_ids),\
self._mock_group_delete() as group_delete:
consumer_id = consumer_ids.pop(0)
self.delete_policy_target_group(consumer_id)
map_create.assert_has_calls(
[self.ingress_map_call(prs_id,
[provider_id],
consumer_ids)])
map_delete.assert_not_called()
group_delete.assert_called_with(TEST_PROJECT, consumer_id)
star_size -= 1
self.assert_neutron_resources(star_size, star_size, star_size)
# Delete provider group
with self._mock_map_delete() as map_delete,\
self._mock_map_create() as map_create,\
self._mock_profile_list(profile_ids),\
self._mock_group_delete() as group_delete:
self.delete_policy_target_group(provider_id)
map_create.assert_not_called()
map_delete.assert_called_with(TEST_PROJECT, driver.in_name(prs_id))
star_size -= 1
group_delete.assert_called_with(TEST_PROJECT, provider_id)
class TestPolicyRuleSet(NsxPolicyMappingTestCase):
def test_bidirectional(self):
''' Create and delete bidirectional rule set'''
with self._mock_profile_create() as profile_create,\
self._mock_profile_delete() as profile_delete:
rule = self._create_simple_policy_rule()
rule_set = self.create_policy_rule_set(
name='test', policy_rules=[rule['id']])['policy_rule_set']
calls = [call(name=driver.in_name('test'),
description=mock.ANY,
profile_id=driver.in_name(rule_set['id']),
services=[rule['policy_classifier_id']]),
call(name=driver.out_name('test'),
description=mock.ANY,
profile_id=driver.out_name(rule_set['id']),
services=[rule['policy_classifier_id']])]
profile_create.assert_has_calls(calls)
self.delete_policy_rule_set(rule_set['id'])
calls = [call(driver.in_name(rule_set['id'])),
call(driver.out_name(rule_set['id']))]
profile_delete.assert_has_calls(calls)
def test_empty(self):
''' Create and delete empty rule set and verify no backend calls'''
rule = self._create_simple_policy_rule()
rule_set = self.create_policy_rule_set(
name='test', policy_rules=[rule['id']])['policy_rule_set']
self.delete_policy_rule_set(rule_set['id'])
def test_create_fails(self):
''' Create bidirectional rule set and fail second API call'''
with self._mock_nth_profile_create_fails() as profile_create,\
self._mock_profile_delete() as profile_delete:
rule = self._create_simple_policy_rule()
self.assertRaises(webob.exc.HTTPClientError,
self.create_policy_rule_set,
name='test',
policy_rules=[rule['id']])
# Two create calls expected
calls = [call(name=driver.in_name('test'),
description=mock.ANY,
profile_id=mock.ANY,
services=[rule['policy_classifier_id']]),
call(name=driver.out_name('test'),
description=mock.ANY,
profile_id=mock.ANY,
services=[rule['policy_classifier_id']])]
profile_create.assert_has_calls(calls)
# Rollback - two delete calls expected
calls = [call(mock.ANY), call(mock.ANY)]
profile_delete.assert_has_calls(calls)
def _assert_profile_call(self, mock_calls,
name, profile_id, services):
'''Asserts service list in any order'''
services_set = set(services)
for mock_call in mock_calls.call_args_list:
if isinstance(mock_call, dict):
if (mock_call.get('name') == name and
mock_call.get('profile_id') == profile_id and
set(mock_call.get('services')) == services_set):
return True
def test_multi_set(self):
'''Test lifecycle of set with 3 rules having different dirs'''
# Create rule set with 3 rules
with self._mock_profile_create() as profile_create:
rule1 = self._create_simple_policy_rule('in', 'tcp', '7887')
rule2 = self._create_simple_policy_rule('out', 'udp', '8778')
rule3 = self._create_simple_policy_rule('bi', 'tcp', '5060')
rule_set = self.create_policy_rule_set(
name='test', policy_rules=[rule1['id'],
rule2['id'],
rule3['id']])['policy_rule_set']
self.assertEqual(2, profile_create.call_count)
profile_create._assert_profile_call(
driver.in_name('test'),
driver.in_name(rule_set['id']),
[rule1['policy_classifier_id'], rule3['policy_classifier_id']])
profile_create._assert_profile_call(
driver.out_name('test'),
driver.out_name(rule_set['id']),
[rule2['policy_classifier_id'], rule3['policy_classifier_id']])
# Replace rule3 with rule4
with self._mock_profile_create() as profile_update:
rule4 = self._create_simple_policy_rule('out', 'tcp', '555:777')
rule_set1 = self.update_policy_rule_set(
rule_set['id'], policy_rules=[rule1['id'],
rule2['id'],
rule4['id']])['policy_rule_set']
self.assertEqual(rule_set['id'], rule_set1['id'])
self.assertEqual(2, profile_create.call_count)
profile_update._assert_profile_call(
driver.in_name('test'),
driver.in_name(rule_set['id']),
[rule1['policy_classifier_id']])
profile_update._assert_profile_call(
driver.out_name('test'),
driver.out_name(rule_set['id']),
[rule2['policy_classifier_id'], rule4['policy_classifier_id']])
# Delete rule1 from the rule set and verify ingress profile is
# is deleted on backend
with self._mock_profile_delete() as profile_delete:
self.update_policy_rule_set(rule_set['id'],
policy_rules=[rule2['id'],
rule4['id']])
profile_delete.assert_called_once_with(
driver.in_name(rule_set['id']))
# Delete the rule set and verify egress profile is deleted
with self._mock_profile_delete() as profile_delete:
self.delete_policy_rule_set(rule_set['id'])
profile_delete.assert_called_once_with(
driver.out_name(rule_set['id']))
class TestPolicyTargetTag(NsxPolicyMappingTestCase):
def _prepare_group(self, name='test'):
with self._mock_group_create():
return self.create_policy_target_group(
name='test')['policy_target_group']
def test_target_lifecycle(self):
self._mock_nsx_db()
ptg = self._prepare_group()
# create policy target and verify port tag update
with self._mock_nsx_port_update() as port_update:
target = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
# nsx mock function will map neutron port id to same value
# for nsx port id
port_update.assert_called_once_with(
target['port_id'],
None,
tags_update=[{'scope': 'gbp',
'tag': ptg['id']}])
# verify group membership change is not supported
ptg1 = self._prepare_group()
self.assertRaises(webob.exc.HTTPClientError,
self.update_policy_target,
target['id'],
policy_target_group_id=ptg1['id'])
# policy target deletion should not affect backend policy-wise
self.delete_policy_target(target['id'])

View File

@ -63,6 +63,7 @@ gbpservice.neutron.group_policy.policy_drivers =
chain_mapping = gbpservice.neutron.services.grouppolicy.drivers.chain_mapping:ChainMappingDriver
aim_mapping = gbpservice.neutron.services.grouppolicy.drivers.cisco.apic.aim_mapping:AIMMappingDriver
apic = gbpservice.neutron.services.grouppolicy.drivers.cisco.apic.apic_mapping:ApicMappingDriver
nsx_policy = gbpservice.neutron.services.grouppolicy.drivers.vmware.nsx_policy.nsx_policy_mapping:NsxPolicyMappingDriver
neutron.ml2.mechanism_drivers =
logger_plus = gbpservice.neutron.tests.unit.plugins.ml2plus.drivers.mechanism_logger:LoggerPlusMechanismDriver
apic_aim = gbpservice.neutron.plugins.ml2plus.drivers.apic_aim.mechanism_driver:ApicMechanismDriver

View File

@ -9,6 +9,9 @@
-e git+https://github.com/noironetworks/python-opflex-agent.git@sumit/stable/ocata#egg=python-opflexagent-agent
-e git+https://github.com/noironetworks/apic-ml2-driver.git@sumit/ocata#egg=apic_ml2
-e git+https://github.com/openstack/vmware-nsx.git@stable/ocata#egg=vmware_nsx
-e git+https://github.com/openstack/vmware-nsxlib.git@master#egg=vmware_nsxlib
-e git+https://git.openstack.org/openstack/python-group-based-policy-client@master#egg=gbpclient
-e git+https://git.openstack.org/openstack/neutron-vpnaas@stable/ocata#egg=neutron-vpnaas
-e git+https://git.openstack.org/openstack/neutron-lbaas@stable/ocata#egg=neutron-lbaas